diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index b87058e0b19c..1a0f571cd1fe 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -610,8 +610,218 @@ }, { "ImportPath": "github.com/docker/distribution", - "Comment": "v2.2.1", - "Rev": "e6c60e79c570f97ef36f280fcebed497682a5f37" + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/configuration", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/context", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/digest", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/health", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/health/checks", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/manifest", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/manifest/manifestlist", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/manifest/schema1", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/manifest/schema2", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/notifications", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/reference", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/api/errcode", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/api/v2", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/auth", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/auth/htpasswd", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/auth/token", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/client", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/client/auth", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/client/transport", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/handlers", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/middleware/registry", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/middleware/repository", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/proxy", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/proxy/scheduler", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/cache", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/cache/memory", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/cache/redis", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/driver", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/driver/azure", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/driver/base", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/driver/factory", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/driver/filesystem", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/driver/gcs", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/driver/inmemory", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/driver/middleware", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/driver/oss", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/driver/s3-aws", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/registry/storage/driver/swift", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/uuid", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" + }, + { + "ImportPath": "github.com/docker/distribution/version", + "Comment": "v2.4.0-rc.1-106-g596ca8b", + "Rev": "596ca8b86acd3feebedae6bc08abf2a48d403a14" }, { "ImportPath": "github.com/docker/distribution/configuration", diff --git a/Godeps/_workspace/src/github.com/docker/distribution/.drone.yml b/Godeps/_workspace/src/github.com/docker/distribution/.drone.yml deleted file mode 100644 index d943e19ff18e..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/.drone.yml +++ /dev/null @@ -1,38 +0,0 @@ -image: dmp42/go:stable - -script: - # To be spoofed back into the test image - - go get github.com/modocache/gover - - - go get -t ./... - - # Go fmt - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - # Go lint - - test -z "$(golint ./... | tee /dev/stderr)" - # Go vet - - go vet ./... - # Go test - - go test -v -race -cover ./... - # Helper to concatenate reports - - gover - # Send to coverall - - goveralls -service drone.io -coverprofile=gover.coverprofile -repotoken {{COVERALLS_TOKEN}} - - # Do we want these as well? - # - go get code.google.com/p/go.tools/cmd/goimports - # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" - # http://labix.org/gocheck - -notify: - email: - recipients: - - distribution@docker.com - - slack: - team: docker - channel: "#dt" - username: mom - token: {{SLACK_TOKEN}} - on_success: true - on_failure: true diff --git a/Godeps/_workspace/src/github.com/docker/distribution/.mailmap b/Godeps/_workspace/src/github.com/docker/distribution/.mailmap index 191e60cdace7..f0452da61943 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/.mailmap +++ b/Godeps/_workspace/src/github.com/docker/distribution/.mailmap @@ -2,6 +2,7 @@ Stephen J Day Stephen Day Stephen Day Olivier Gambier Olivier Gambier Brian Bland Brian Bland +Brian Bland Brian Bland Josh Hawn Josh Hawn Richard Scothern Richard Richard Scothern Richard Scothern @@ -11,4 +12,5 @@ Jessie Frazelle Sharif Nassar Sharif Nassar Sven Dowideit Sven Dowideit Vincent Giersch Vincent Giersch -davidli davidli \ No newline at end of file +davidli davidli +Omer Cohen Omer Cohen \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS b/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS index 4b97cd78d5e8..70d525999bc3 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS +++ b/Godeps/_workspace/src/github.com/docker/distribution/AUTHORS @@ -1,4 +1,5 @@ Aaron Lehmann +Aaron Schlesinger Aaron Vinson Adam Enger Adrian Mouat @@ -7,13 +8,19 @@ Alex Chan Alex Elman amitshukla Amy Lindburg +Andrew Hsu Andrew Meredith +Andrew T Nguyen Andrey Kostov Andy Goldstein +Anis Elleuch Anton Tiurin Antonio Mercado +Antonio Murdaca +Arien Holthuizen Arnaud Porterie Arthur Baars +Asuka Suzuki Avi Miller Ayose Cazorla BadZen @@ -21,8 +28,10 @@ Ben Firshman bin liu Brian Bland burnettk +Carson A Chris Dillon Daisuke Fujita +Daniel Huhn Darren Shepherd Dave Trombley Dave Tucker @@ -33,36 +42,52 @@ davidli Dejan Golja Derek McGowan Diogo Mónica +DJ Enriquez Donald Huang Doug Davis +Eric Yang +Fabio Huser farmerworking +Felix Yan Florentin Raud Frederick F. Kautz IV +gabriell nascimento harche Henri Gomez Hu Keping Hua Wang +HuKeping Ian Babrou +igayoso Jack Griffin Jason Freidman Jeff Nickoloff Jessie Frazelle +jhaohai Jianqing Wang +John Starks +Jon Johnson Jon Poler Jonathan Boulle Jordan Liggitt Josh Hawn Julien Fernandez +Keerthan Mala Kelsey Hightower Kenneth Lim +Kenny Leung Li Yi +Liu Hua +liuchang0812 Louis Kottmann Luke Carpenter Mary Anthony Matt Bentley +Matt Duch Matt Moore Matt Robenolt Michael Prokop +Michal Minar Miquel Sabaté Morgan Bauer moxiegirl @@ -73,16 +98,23 @@ Nuutti Kotivuori Oilbeater Olivier Gambier Olivier Jacques +Omer Cohen Patrick Devine +Phil Estes Philip Misiowiec Richard Scothern +Rodolfo Carvalho Rusty Conover +Sean Boran Sebastiaan van Stijn +Serge Dubrouski Sharif Nassar Shawn Falkner-Horine Shreyas Karnik Simon Thulbourn Spencer Rinehart +Stefan Majewsky +Stefan Weil Stephen J Day Sungho Moon Sven Dowideit @@ -93,12 +125,16 @@ Thomas Sjögren Tianon Gravi Tibor Vass Tonis Tiigi +Tony Holdstock-Brown +Trevor Pounds Troels Thomsen Vincent Batts Vincent Demeester Vincent Giersch W. Trevor King +weiyuan.yl xg.song xiekeyang Yann ROBERT yuzou +姜继忠 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md index 1a9ecb744278..7cc7aedffeb1 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/CONTRIBUTING.md @@ -76,7 +76,7 @@ Some simple rules to ensure quick merge: You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. -If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. +If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. Then you should submit your implementation, clearly linking to the issue (and possible proposal). @@ -90,7 +90,7 @@ It's mandatory to: Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. -Have a look at a great, succesful contribution: the [Ceph driver PR](https://github.com/docker/distribution/pull/443) +Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) ## Coding Style diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile index 1a5822229ef7..fa9cd4627ef8 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile +++ b/Godeps/_workspace/src/github.com/docker/distribution/Dockerfile @@ -1,19 +1,18 @@ -FROM golang:1.5.2 - -RUN apt-get update && \ - apt-get install -y librados-dev apache2-utils && \ - rm -rf /var/lib/apt/lists/* +FROM golang:1.6-alpine ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution -ENV GOPATH $DISTRIBUTION_DIR/Godeps/_workspace:$GOPATH -ENV DOCKER_BUILDTAGS include_rados include_oss include_gcs +ENV DOCKER_BUILDTAGS include_oss include_gcs WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml + +RUN set -ex \ + && apk add --no-cache make git + RUN make PREFIX=/go clean binaries VOLUME ["/var/lib/registry"] EXPOSE 5000 ENTRYPOINT ["registry"] -CMD ["/etc/docker/registry/config.yml"] +CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json index 028f99009ddc..56cd2dd9b5e6 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json +++ b/Godeps/_workspace/src/github.com/docker/distribution/Godeps/Godeps.json @@ -1,92 +1,179 @@ { "ImportPath": "github.com/docker/distribution", - "GoVersion": "go1.4.2", + "GoVersion": "go1.6", + "GodepVersion": "v60", "Packages": [ "./..." ], "Deps": [ { - "ImportPath": "golang.org/x/oauth2", - "Rev": "8914e5017ca260f2a3a1575b1e6868874050d95e" + "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", + "Comment": "v1.2-334-g95361a2", + "Rev": "95361a2573b1fa92a00c5fc2707a80308483c6f9" }, { - "ImportPath": "golang.org/x/oauth2/google", - "Rev": "8914e5017ca260f2a3a1575b1e6868874050d95e" + "ImportPath": "github.com/Sirupsen/logrus", + "Comment": "v0.7.3", + "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" }, { - "ImportPath": "google.golang.org/api/storage/v1", - "Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121" + "ImportPath": "github.com/Sirupsen/logrus/formatters/logstash", + "Comment": "v0.7.3", + "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" }, { - "ImportPath": "google.golang.org/cloud", - "Rev": "2400193c85c3561d13880d34e0e10c4315bb02af" + "ImportPath": "github.com/aws/aws-sdk-go/aws", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" }, { - "ImportPath": "google.golang.org/api", - "Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121" + "ImportPath": "github.com/aws/aws-sdk-go/aws/awserr", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" }, { - "ImportPath": "google.golang.org/grpc", - "Rev": "91c8b79535eb6045d70ec671d302213f88a3ab95" + "ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" }, { - "ImportPath": "github.com/bradfitz/http2", - "Rev": "f8202bc903bda493ebba4aa54922d78430c2c42f" + "ImportPath": "github.com/aws/aws-sdk-go/aws/client", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" }, { - "ImportPath": "github.com/golang/protobuf", - "Rev": "0f7a9caded1fb3c9cc5a9b4bcf2ff633cc8ae644" + "ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" }, { - "ImportPath": "google.golang.org/cloud/storage", - "Rev": "2400193c85c3561d13880d34e0e10c4315bb02af" + "ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" }, { - "ImportPath": "github.com/AdRoll/goamz/aws", - "Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1" + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" }, { - "ImportPath": "github.com/AdRoll/goamz/cloudfront", - "Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1" + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" }, { - "ImportPath": "github.com/AdRoll/goamz/s3", - "Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1" + "ImportPath": "github.com/aws/aws-sdk-go/aws/defaults", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" }, { - "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", - "Rev": "97d9593768bbbbd316f9c055dfc5f780933cd7fc" + "ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" }, { - "ImportPath": "github.com/Sirupsen/logrus", - "Comment": "v0.7.3", - "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" + "ImportPath": "github.com/aws/aws-sdk-go/aws/request", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/session", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/endpoints", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/waiter", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/cloudfront/sign", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/s3", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" }, { "ImportPath": "github.com/bugsnag/bugsnag-go", "Comment": "v1.0.2-5-gb1d1530", "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" }, + { + "ImportPath": "github.com/bugsnag/bugsnag-go/errors", + "Comment": "v1.0.2-5-gb1d1530", + "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" + }, { "ImportPath": "github.com/bugsnag/osext", "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702" }, { "ImportPath": "github.com/bugsnag/panicwrap", - "Rev": "e5f9854865b9778a45169fc249e99e338d4d6f27" + "Comment": "1.0.0-2-ge2c2850", + "Rev": "e2c28503fcd0675329da73bf48b33404db873782" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/common", + "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" }, { "ImportPath": "github.com/denverdino/aliyungo/oss", - "Rev": "0e0f322d0a54b994dea9d32541050d177edf6aa3" + "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" }, { "ImportPath": "github.com/denverdino/aliyungo/util", - "Rev": "0e0f322d0a54b994dea9d32541050d177edf6aa3" + "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" + }, + { + "ImportPath": "github.com/docker/goamz/aws", + "Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85" }, { - "ImportPath": "github.com/docker/docker/pkg/tarsum", - "Comment": "v1.4.1-3932-gb63ec6e", - "Rev": "b63ec6e4b1f6f5c77a6a74a52fcea9564538c575" + "ImportPath": "github.com/docker/goamz/s3", + "Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85" }, { "ImportPath": "github.com/docker/libtrust", @@ -100,6 +187,15 @@ "ImportPath": "github.com/garyburd/redigo/redis", "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" }, + { + "ImportPath": "github.com/go-ini/ini", + "Comment": "v1.8.6", + "Rev": "afbd495e5aaea13597b5e14fe514ddeaa4d76fc3" + }, + { + "ImportPath": "github.com/golang/protobuf/proto", + "Rev": "8d92cf5fc15a4382f8964b08e1f42a75c0591aa3" + }, { "ImportPath": "github.com/gorilla/context", "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" @@ -116,6 +212,11 @@ "ImportPath": "github.com/inconshreveable/mousetrap", "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" }, + { + "ImportPath": "github.com/jmespath/go-jmespath", + "Comment": "0.2.2-12-g0b12d6b", + "Rev": "0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74" + }, { "ImportPath": "github.com/mitchellh/mapstructure", "Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef" @@ -125,9 +226,8 @@ "Rev": "c54732e87b0b283d1baf0a18db689d0aea460ba3" }, { - "ImportPath": "github.com/noahdesu/go-ceph/rados", - "Comment": "v.0.3.0-29-gb15639c", - "Rev": "b15639c44c05368348355229070361395d9152ee" + "ImportPath": "github.com/ncw/swift/swifttest", + "Rev": "c54732e87b0b283d1baf0a18db689d0aea460ba3" }, { "ImportPath": "github.com/spf13/cobra", @@ -141,6 +241,14 @@ "ImportPath": "github.com/stevvooe/resumable", "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" }, + { + "ImportPath": "github.com/stevvooe/resumable/sha256", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, + { + "ImportPath": "github.com/stevvooe/resumable/sha512", + "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" + }, { "ImportPath": "github.com/yvasiyarov/go-metrics", "Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e" @@ -164,11 +272,151 @@ }, { "ImportPath": "golang.org/x/net/context", - "Rev": "2cba614e8ff920c60240d2677bc019af32ee04e5" + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/context/ctxhttp", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/http2", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/http2/hpack", + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/net/internal/timeseries", + "Rev": "4876518f9e71663000c348837735820161a42df7" }, { "ImportPath": "golang.org/x/net/trace", - "Rev": "2cba614e8ff920c60240d2677bc019af32ee04e5" + "Rev": "4876518f9e71663000c348837735820161a42df7" + }, + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/google", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/internal", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/jws", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "golang.org/x/oauth2/jwt", + "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" + }, + { + "ImportPath": "google.golang.org/api/gensupport", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/api/googleapi", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/api/storage/v1", + "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" + }, + { + "ImportPath": "google.golang.org/appengine", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/app_identity", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/base", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/datastore", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/log", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/modules", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/appengine/internal/remote_api", + "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" + }, + { + "ImportPath": "google.golang.org/cloud", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/compute/metadata", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/internal", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/internal/opts", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/cloud/storage", + "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" + }, + { + "ImportPath": "google.golang.org/grpc", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/codes", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/credentials", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/grpclog", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/internal", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/metadata", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/naming", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/peer", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" + }, + { + "ImportPath": "google.golang.org/grpc/transport", + "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" }, { "ImportPath": "gopkg.in/check.v1", diff --git a/Godeps/_workspace/src/github.com/docker/distribution/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/distribution/MAINTAINERS index bda400150c90..97f415dbfbcd 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/MAINTAINERS +++ b/Godeps/_workspace/src/github.com/docker/distribution/MAINTAINERS @@ -32,6 +32,11 @@ Email = "aaron.lehmann@docker.com" GitHub = "aaronlehmann" + [people.brianbland] + Name = "Brian Bland" + Email = "brian.bland@docker.com" + GitHub = "BrianBland" + [people.dmcgowan] Name = "Derek McGowan" Email = "derek@mcgstyle.net" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/Makefile b/Godeps/_workspace/src/github.com/docker/distribution/Makefile index 4604a39a025b..a0602d0b2c01 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/Makefile +++ b/Godeps/_workspace/src/github.com/docker/distribution/Makefile @@ -14,8 +14,8 @@ endif GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" .PHONY: clean all fmt vet lint build test binaries -.DEFAULT: default -all: AUTHORS clean fmt vet fmt lint build test binaries +.DEFAULT: all +all: fmt vet lint build test binaries AUTHORS: .mailmap .git/HEAD git log --format='%aN <%aE>' | sort -fu > $@ @@ -24,51 +24,83 @@ AUTHORS: .mailmap .git/HEAD version/version.go: ./version/version.sh > $@ -${PREFIX}/bin/registry: version/version.go $(shell find . -type f -name '*.go') +# Required for go 1.5 to build +GO15VENDOREXPERIMENT := 1 + +# Package list +PKGS := $(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) + +# Resolving binary dependencies for specific targets +GOLINT := $(shell which golint || echo '') +GODEP := $(shell which godep || echo '') + +${PREFIX}/bin/registry: $(wildcard **/*.go) @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry -${PREFIX}/bin/digest: version/version.go $(shell find . -type f -name '*.go') +${PREFIX}/bin/digest: $(wildcard **/*.go) @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest -${PREFIX}/bin/registry-api-descriptor-template: version/version.go $(shell find . -type f -name '*.go') +${PREFIX}/bin/registry-api-descriptor-template: $(wildcard **/*.go) @echo "+ $@" @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template ./bin/registry-api-descriptor-template $< > $@ -# Depends on binaries because vet will silently fail if it can't load compiled -# imports -vet: binaries +vet: @echo "+ $@" - @go vet ./... + @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) fmt: @echo "+ $@" - @test -z "$$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" || \ - echo "+ please format Go code with 'gofmt -s'" + @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ + (echo >&2 "+ please format Go code with 'gofmt -s'" && false) lint: @echo "+ $@" - @test -z "$$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" + $(if $(GOLINT), , \ + $(error Please install golint: `go get -u github.com/golang/lint/golint`)) + @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" build: @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} ./... + @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) test: @echo "+ $@" - @go test -test.short -tags "${DOCKER_BUILDTAGS}" ./... + @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) test-full: @echo "+ $@" - @go test ./... + @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template @echo "+ $@" clean: @echo "+ $@" - @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/registry-api-descriptor-template" + @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" + +dep-save: + @echo "+ $@" + $(if $(GODEP), , \ + $(error Please install godep: go get github.com/tools/godep)) + @$(GODEP) save $(PKGS) + +dep-restore: + @echo "+ $@" + $(if $(GODEP), , \ + $(error Please install godep: go get github.com/tools/godep)) + @$(GODEP) restore -v + +dep-validate: dep-restore + @echo "+ $@" + @rm -Rf .vendor.bak + @mv vendor .vendor.bak + @rm -Rf Godeps + @$(GODEP) save ./... + @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ + (echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false) + @rm -Rf .vendor.bak diff --git a/Godeps/_workspace/src/github.com/docker/distribution/README.md b/Godeps/_workspace/src/github.com/docker/distribution/README.md index a6d51b68e3da..c21d47243aeb 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/README.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/README.md @@ -17,9 +17,9 @@ This repository contains the following components: |**Component** |Description | |--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with,distribution components. Please see [godoc](http://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | | **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](http://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | ### How does this integrate with Docker engine? @@ -58,7 +58,7 @@ For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). ### Who needs to deploy a registry? By default, Docker users pull images from Docker's public registry instance. -[Installing Docker](http://docs.docker.com/installation) gives users this +[Installing Docker](https://docs.docker.com/engine/installation/) gives users this ability. Users can also push images to a repository on Docker's public registry, if they have a [Docker Hub](https://hub.docker.com/) account. @@ -128,4 +128,4 @@ avenues are available for support: ## License -This project is distributed under [Apache License, Version 2.0](LICENSE.md). +This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/blobs.go b/Godeps/_workspace/src/github.com/docker/distribution/blobs.go index 63d4505fcff5..d125330117f8 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/blobs.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/blobs.go @@ -9,6 +9,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" ) var ( @@ -40,6 +41,18 @@ func (err ErrBlobInvalidDigest) Error() string { err.Digest, err.Reason) } +// ErrBlobMounted returned when a blob is mounted from another repository +// instead of initiating an upload session. +type ErrBlobMounted struct { + From reference.Canonical + Descriptor Descriptor +} + +func (err ErrBlobMounted) Error() string { + return fmt.Sprintf("blob mounted from: %v to: %v", + err.From, err.Descriptor) +} + // Descriptor describes targeted content. Used in conjunction with a blob // store, a descriptor can be used to fetch, store and target any kind of // blob. The struct also describes the wire protocol format. Fields should @@ -56,11 +69,23 @@ type Descriptor struct { // against against this digest. Digest digest.Digest `json:"digest,omitempty"` + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + // NOTE: Before adding a field here, please ensure that all // other options have been exhausted. Much of the type relationships // depend on the simplicity of this type. } +// Descriptor returns the descriptor, to make it satisfy the Describable +// interface. Note that implementations of Describable are generally objects +// which can be described, not simply descriptors; this exception is in place +// to make it more convenient to pass actual descriptors to functions that +// expect Describable objects. +func (d Descriptor) Descriptor() Descriptor { + return d +} + // BlobStatter makes blob descriptors available by digest. The service may // provide a descriptor of a different digest if the provided digest is not // canonical. @@ -75,12 +100,9 @@ type BlobDeleter interface { Delete(ctx context.Context, dgst digest.Digest) error } -// BlobEnumerator allows to list blobs in storage. +// BlobEnumerator enables iterating over blobs from storage type BlobEnumerator interface { - // Enumerate calls ingester callback for each digest found in a blob store - // until the callback returns an error or the blob store is processed. - // io.EOF will be returned if all the digests are handled. - Enumerate(ctx context.Context, ingester func(digest.Digest) error) error + Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error } // BlobDescriptorService manages metadata about a blob by digest. Most @@ -105,6 +127,11 @@ type BlobDescriptorService interface { Clear(ctx context.Context, dgst digest.Digest) error } +// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. +type BlobDescriptorServiceFactory interface { + BlobAccessController(svc BlobDescriptorService) BlobDescriptorService +} + // ReadSeekCloser is the primary reader type for blob data, combining // io.ReadSeeker with io.Closer. type ReadSeekCloser interface { @@ -150,20 +177,31 @@ type BlobIngester interface { // returned handle can be written to and later resumed using an opaque // identifier. With this approach, one can Close and Resume a BlobWriter // multiple times until the BlobWriter is committed or cancelled. - Create(ctx context.Context) (BlobWriter, error) + Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) // Resume attempts to resume a write to a blob, identified by an id. Resume(ctx context.Context, id string) (BlobWriter, error) } +// BlobCreateOption is a general extensible function argument for blob creation +// methods. A BlobIngester may choose to honor any or none of the given +// BlobCreateOptions, which can be specific to the implementation of the +// BlobIngester receiving them. +// TODO (brianbland): unify this with ManifestServiceOption in the future +type BlobCreateOption interface { + Apply(interface{}) error +} + // BlobWriter provides a handle for inserting data into a blob store. // Instances should be obtained from BlobWriteService.Writer and // BlobWriteService.Resume. If supported by the store, a writer can be // recovered with the id. type BlobWriter interface { - io.WriteSeeker + io.WriteCloser io.ReaderFrom - io.Closer + + // Size returns the number of bytes written to this blob. + Size() int64 // ID returns the identifier for this writer. The ID can be used with the // Blob service to later resume the write. @@ -188,9 +226,6 @@ type BlobWriter interface { // result in a no-op. This allows use of Cancel in a defer statement, // increasing the assurance that it is correctly called. Cancel(ctx context.Context) error - - // Get a reader to the blob being written by this BlobWriter - Reader() (io.ReadCloser, error) } // BlobService combines the operations to access, read and write blobs. This @@ -202,10 +237,9 @@ type BlobService interface { } // BlobStore represent the entire suite of blob related operations. Such an -// implementation can access, enumerate, read, write, delete and serve blobs. +// implementation can access, read, write, delete and serve blobs. type BlobStore interface { BlobService BlobServer - BlobEnumerator BlobDeleter } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/circle.yml b/Godeps/_workspace/src/github.com/docker/distribution/circle.yml index 30337197aaae..3d1ffd2f06ac 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/circle.yml +++ b/Godeps/_workspace/src/github.com/docker/distribution/circle.yml @@ -3,13 +3,12 @@ machine: pre: # Install gvm - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) - # Install ceph to test rados driver & create pool - - sudo -i ~/distribution/contrib/ceph/ci-setup.sh - - ceph osd pool create docker-distribution 1 + # Install codecov for coverage + - pip install --user codecov post: # go - - gvm install go1.5 --prefer-binary --name=stable + - gvm install go1.6 --prefer-binary --name=stable environment: # Convenient shortcuts to "common" locations @@ -17,11 +16,9 @@ machine: BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME # Trick circle brainflat "no absolute path" behavior BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR - DOCKER_BUILDTAGS: "include_rados include_oss include_gcs" + DOCKER_BUILDTAGS: "include_oss include_gcs" # Workaround Circle parsing dumb bugs and/or YAML wonkyness CIRCLE_PAIN: "mode: set" - # Ceph config - RADOS_POOL: "docker-distribution" hosts: # Not used yet @@ -45,9 +42,6 @@ dependencies: - > gvm use stable && go get github.com/axw/gocov/gocov github.com/golang/lint/golint - - # Disabling goveralls for now - # go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint test: pre: @@ -55,43 +49,39 @@ test: # - gvm use old && go version - gvm use stable && go version + # Ensure validation of dependencies + - gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi: + pwd: $BASE_STABLE + # First thing: build everything. This will catch compile errors, and it's # also necessary for go vet to work properly (see #807). - - gvm use stable && godep go install ./...: + - gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"): pwd: $BASE_STABLE # FMT - - gvm use stable && test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": + - gvm use stable && make fmt: pwd: $BASE_STABLE # VET - - gvm use stable && go vet ./...: + - gvm use stable && make vet: pwd: $BASE_STABLE # LINT - - gvm use stable && test -z "$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": + - gvm use stable && make lint: pwd: $BASE_STABLE override: - # Test stable, and report - # Preset the goverall report file - # - echo "$CIRCLE_PAIN" > ~/goverage.report - - - gvm use stable; go list ./... | xargs -L 1 -I{} rm -f $GOPATH/src/{}/coverage.out: - pwd: $BASE_STABLE - - - gvm use stable; go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/{}/coverage.out {}: + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': timeout: 600 pwd: $BASE_STABLE post: - # Aggregate and report to coveralls - - gvm use stable; go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} cat "$GOPATH/src/{}/coverage.out" | grep -v "$CIRCLE_PAIN" >> ~/goverage.report: + # Report to codecov + - bash <(curl -s https://codecov.io/bash): pwd: $BASE_STABLE ## Notes - # Disabled coveralls reporting: build breaking sending coverage data to coveralls # Disabled the -race detector due to massive memory usage. # Do we want these as well? # - go get code.google.com/p/go.tools/cmd/goimports diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/digest/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/digest/main.go deleted file mode 100644 index f4870d920f2e..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/digest/main.go +++ /dev/null @@ -1,129 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "strings" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/version" - "github.com/docker/docker/pkg/tarsum" -) - -var ( - algorithm = digest.Canonical - showVersion bool -) - -type job struct { - name string - reader io.Reader -} - -func init() { - flag.Var(&algorithm, "a", "select the digest algorithm (shorthand)") - flag.Var(&algorithm, "algorithm", "select the digest algorithm") - flag.BoolVar(&showVersion, "version", false, "show the version and exit") - - log.SetFlags(0) - log.SetPrefix(os.Args[0] + ": ") -} - -func usage() { - fmt.Fprintf(os.Stderr, "usage: %s [files...]\n", os.Args[0]) - fmt.Fprintf(os.Stderr, ` -Calculate the digest of one or more input files, emitting the result -to standard out. If no files are provided, the digest of stdin will -be calculated. - -`) - flag.PrintDefaults() -} - -func unsupported() { - log.Fatalf("unsupported digest algorithm: %v", algorithm) -} - -func main() { - var jobs []job - - flag.Usage = usage - flag.Parse() - if showVersion { - version.PrintVersion() - return - } - - var fail bool // if we fail on one item, foul the exit code - if flag.NArg() > 0 { - for _, path := range flag.Args() { - fp, err := os.Open(path) - - if err != nil { - log.Printf("%s: %v", path, err) - fail = true - continue - } - defer fp.Close() - - jobs = append(jobs, job{name: path, reader: fp}) - } - } else { - // just read stdin - jobs = append(jobs, job{name: "-", reader: os.Stdin}) - } - - digestFn := algorithm.FromReader - - if !algorithm.Available() { - // we cannot digest if is not available. An exception is made for - // tarsum. - if !strings.HasPrefix(algorithm.String(), "tarsum") { - unsupported() - } - - var version tarsum.Version - if algorithm == "tarsum" { - // small hack: if we just have tarsum, use latest - version = tarsum.Version1 - } else { - var err error - version, err = tarsum.GetVersionFromTarsum(algorithm.String()) - if err != nil { - unsupported() - } - } - - digestFn = func(rd io.Reader) (digest.Digest, error) { - ts, err := tarsum.NewTarSum(rd, true, version) - if err != nil { - return "", err - } - - if _, err := io.Copy(ioutil.Discard, ts); err != nil { - return "", err - } - - return digest.Digest(ts.Sum(nil)), nil - } - } - - for _, job := range jobs { - dgst, err := digestFn(job.reader) - if err != nil { - log.Printf("%s: %v", job.name, err) - fail = true - continue - } - - fmt.Printf("%v\t%s\n", dgst, job.name) - } - - if fail { - os.Exit(1) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go deleted file mode 100644 index e9cbc42a42d1..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry-api-descriptor-template/main.go +++ /dev/null @@ -1,131 +0,0 @@ -// registry-api-descriptor-template uses the APIDescriptor defined in the -// api/v2 package to execute templates passed to the command line. -// -// For example, to generate a new API specification, one would execute the -// following command from the repo root: -// -// $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md -// -// The templates are passed in the api/v2.APIDescriptor object. Please see the -// package documentation for fields available on that object. The template -// syntax is from Go's standard library text/template package. For information -// on Go's template syntax, please see golang.org/pkg/text/template. -package main - -import ( - "log" - "net/http" - "os" - "path/filepath" - "regexp" - "text/template" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" -) - -var spaceRegex = regexp.MustCompile(`\n\s*`) - -func main() { - - if len(os.Args) != 2 { - log.Fatalln("please specify a template to execute.") - } - - path := os.Args[1] - filename := filepath.Base(path) - - funcMap := template.FuncMap{ - "removenewlines": func(s string) string { - return spaceRegex.ReplaceAllString(s, " ") - }, - "statustext": http.StatusText, - "prettygorilla": prettyGorillaMuxPath, - } - - tmpl := template.Must(template.New(filename).Funcs(funcMap).ParseFiles(path)) - - data := struct { - RouteDescriptors []v2.RouteDescriptor - ErrorDescriptors []errcode.ErrorDescriptor - }{ - RouteDescriptors: v2.APIDescriptor.RouteDescriptors, - ErrorDescriptors: append(errcode.GetErrorCodeGroup("registry.api.v2"), - // The following are part of the specification but provided by errcode default. - errcode.ErrorCodeUnauthorized.Descriptor(), - errcode.ErrorCodeDenied.Descriptor(), - errcode.ErrorCodeUnsupported.Descriptor()), - } - - if err := tmpl.Execute(os.Stdout, data); err != nil { - log.Fatalln(err) - } -} - -// prettyGorillaMuxPath removes the regular expressions from a gorilla/mux -// route string, making it suitable for documentation. -func prettyGorillaMuxPath(s string) string { - // Stateful parser that removes regular expressions from gorilla - // routes. It correctly handles balanced bracket pairs. - - var output string - var label string - var level int - -start: - if s[0] == '{' { - s = s[1:] - level++ - goto capture - } - - output += string(s[0]) - s = s[1:] - - goto end -capture: - switch s[0] { - case '{': - level++ - case '}': - level-- - - if level == 0 { - s = s[1:] - goto label - } - case ':': - s = s[1:] - goto skip - default: - label += string(s[0]) - } - s = s[1:] - goto capture -skip: - switch s[0] { - case '{': - level++ - case '}': - level-- - } - s = s[1:] - - if level == 0 { - goto label - } - - goto skip -label: - if label != "" { - output += "<" + label + ">" - label = "" - } -end: - if s != "" { - goto start - } - - return output - -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml deleted file mode 100644 index 7a274ea591a9..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-cache.yml +++ /dev/null @@ -1,55 +0,0 @@ -version: 0.1 -log: - level: debug - fields: - service: registry - environment: development -storage: - cache: - blobdescriptor: redis - filesystem: - rootdirectory: /var/lib/registry-cache - maintenance: - uploadpurging: - enabled: false -http: - addr: :5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 - headers: - X-Content-Type-Options: [nosniff] -redis: - addr: localhost:6379 - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms -notifications: - endpoints: - - name: local-8082 - url: http://localhost:5003/callback - headers: - Authorization: [Bearer ] - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true - - name: local-8083 - url: http://localhost:8083/callback - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true -proxy: - remoteurl: https://registry-1.docker.io - username: username - password: password -health: - storagedriver: - enabled: true - interval: 10s - threshold: 3 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml deleted file mode 100644 index b6438be50aaa..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-dev.yml +++ /dev/null @@ -1,66 +0,0 @@ -version: 0.1 -log: - level: debug - fields: - service: registry - environment: development - hooks: - - type: mail - disabled: true - levels: - - panic - options: - smtp: - addr: mail.example.com:25 - username: mailuser - password: password - insecure: true - from: sender@example.com - to: - - errors@example.com -storage: - delete: - enabled: true - cache: - blobdescriptor: redis - filesystem: - rootdirectory: /var/lib/registry - maintenance: - uploadpurging: - enabled: false -http: - addr: :5000 - debug: - addr: localhost:5001 - headers: - X-Content-Type-Options: [nosniff] -redis: - addr: localhost:6379 - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms -notifications: - endpoints: - - name: local-5003 - url: http://localhost:5003/callback - headers: - Authorization: [Bearer ] - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true - - name: local-8083 - url: http://localhost:8083/callback - timeout: 1s - threshold: 10 - backoff: 1s - disabled: true -health: - storagedriver: - enabled: true - interval: 10s - threshold: 3 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml deleted file mode 100644 index b5700e1922d2..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/config-example.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: 0.1 -log: - fields: - service: registry -storage: - cache: - blobdescriptor: inmemory - filesystem: - rootdirectory: /var/lib/registry -http: - addr: :5000 - headers: - X-Content-Type-Options: [nosniff] -health: - storagedriver: - enabled: true - interval: 10s - threshold: 3 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go deleted file mode 100644 index 603a44a591f7..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/main.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - _ "net/http/pprof" - - "github.com/docker/distribution/registry" - _ "github.com/docker/distribution/registry/auth/htpasswd" - _ "github.com/docker/distribution/registry/auth/silly" - _ "github.com/docker/distribution/registry/auth/token" - _ "github.com/docker/distribution/registry/proxy" - _ "github.com/docker/distribution/registry/storage/driver/azure" - _ "github.com/docker/distribution/registry/storage/driver/filesystem" - _ "github.com/docker/distribution/registry/storage/driver/gcs" - _ "github.com/docker/distribution/registry/storage/driver/inmemory" - _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" - _ "github.com/docker/distribution/registry/storage/driver/oss" - _ "github.com/docker/distribution/registry/storage/driver/s3" - _ "github.com/docker/distribution/registry/storage/driver/swift" -) - -func main() { - registry.Cmd.Execute() -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go b/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go deleted file mode 100644 index e7ea770aebb2..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/cmd/registry/rados.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build include_rados - -package main - -import _ "github.com/docker/distribution/registry/storage/driver/rados" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go index 3dff32f84149..eee83f8a0ff2 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration.go @@ -73,6 +73,10 @@ type Configuration struct { // Secret specifies the secret key which HMAC tokens are created with. Secret string `yaml:"secret,omitempty"` + // RelativeURLs specifies that relative URLs should be returned in + // Location headers + RelativeURLs bool `yaml:"relativeurls,omitempty"` + // TLS instructs the http server to listen with a TLS configuration. // This only support simple tls configuration with a cert and key. // Mostly, this is useful for testing situations or simple deployments @@ -145,6 +149,21 @@ type Configuration struct { Health Health `yaml:"health,omitempty"` Proxy Proxy `yaml:"proxy,omitempty"` + + // Compatibility is used for configurations of working with older or deprecated features. + Compatibility struct { + // Schema1 configures how schema1 manifests will be handled + Schema1 struct { + // TrustKey is the signing key to use for adding the signature to + // schema1 manifests. + TrustKey string `yaml:"signingkeyfile,omitempty"` + + // DisableSignatureStore will cause all signatures attached to schema1 manifests + // to be ignored. Signatures will be generated on all schema1 manifest requests + // rather than only requests which converted schema2 to schema1. + DisableSignatureStore bool `yaml:"disablesignaturestore,omitempty"` + } `yaml:"schema1,omitempty"` + } `yaml:"compatibility,omitempty"` } // LogHook is composed of hook Level and Type. @@ -202,7 +221,7 @@ type FileChecker struct { // HTTPChecker is a type of entry in the health section for checking HTTP URIs. type HTTPChecker struct { // Timeout is the duration to wait before timing out the HTTP request - Timeout time.Duration `yaml:"interval,omitempty"` + Timeout time.Duration `yaml:"timeout,omitempty"` // StatusCode is the expected status code StatusCode int // Interval is the duration in between checks @@ -219,7 +238,7 @@ type HTTPChecker struct { // TCPChecker is a type of entry in the health section for checking TCP servers. type TCPChecker struct { // Timeout is the duration to wait before timing out the TCP connection - Timeout time.Duration `yaml:"interval,omitempty"` + Timeout time.Duration `yaml:"timeout,omitempty"` // Interval is the duration in between checks Interval time.Duration `yaml:"interval,omitempty"` // Addr is the TCP address to check diff --git a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go index ced321a6e84e..f406a727f20a 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/configuration/configuration_test.go @@ -63,12 +63,13 @@ var configStruct = Configuration{ }, }, HTTP: struct { - Addr string `yaml:"addr,omitempty"` - Net string `yaml:"net,omitempty"` - Host string `yaml:"host,omitempty"` - Prefix string `yaml:"prefix,omitempty"` - Secret string `yaml:"secret,omitempty"` - TLS struct { + Addr string `yaml:"addr,omitempty"` + Net string `yaml:"net,omitempty"` + Host string `yaml:"host,omitempty"` + Prefix string `yaml:"prefix,omitempty"` + Secret string `yaml:"secret,omitempty"` + RelativeURLs bool `yaml:"relativeurls,omitempty"` + TLS struct { Certificate string `yaml:"certificate,omitempty"` Key string `yaml:"key,omitempty"` ClientCAs []string `yaml:"clientcas,omitempty"` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go index 6fe1f817d83c..3b4ab8882f59 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go @@ -1,6 +1,6 @@ // Package context provides several utilities for working with // golang.org/x/net/context in http requests. Primarily, the focus is on -// logging relevent request information but this package is not limited to +// logging relevant request information but this package is not limited to // that purpose. // // The easiest way to get started is to get the background context: diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go b/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go index af4f1351e91e..721964a8489d 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go @@ -10,7 +10,7 @@ import ( // WithTrace allocates a traced timing span in a new context. This allows a // caller to track the time between calling WithTrace and the returned done // function. When the done function is called, a log message is emitted with a -// "trace.duration" field, corresponding to the elapased time and a +// "trace.duration" field, corresponding to the elapsed time and a // "trace.func" field, corresponding to the function that called WithTrace. // // The logging keys "trace.id" and "trace.parent.id" are provided to implement diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/util.go b/Godeps/_workspace/src/github.com/docker/distribution/context/util.go index 299edc004575..cb9ef52e38e7 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/context/util.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/util.go @@ -8,25 +8,17 @@ import ( // since that time. If the key is not found, the value returned will be zero. // This is helpful when inferring metrics related to context execution times. func Since(ctx Context, key interface{}) time.Duration { - startedAtI := ctx.Value(key) - if startedAtI != nil { - if startedAt, ok := startedAtI.(time.Time); ok { - return time.Since(startedAt) - } + if startedAt, ok := ctx.Value(key).(time.Time); ok { + return time.Since(startedAt) } - return 0 } // GetStringValue returns a string value from the context. The empty string // will be returned if not found. func GetStringValue(ctx Context, key interface{}) (value string) { - stringi := ctx.Value(key) - if stringi != nil { - if valuev, ok := stringi.(string); ok { - value = valuev - } + if valuev, ok := ctx.Value(key).(string); ok { + value = valuev } - return value } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/README.MD b/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/README.MD deleted file mode 100644 index 29f6bae1843f..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/README.MD +++ /dev/null @@ -1,36 +0,0 @@ -# Apache HTTPd sample for Registry v1, v2 and mirror - -3 containers involved - -* Docker Registry v1 (registry 0.9.1) -* Docker Registry v2 (registry 2.0.0) -* Docker Registry v1 in mirror mode - -HTTP for mirror and HTTPS for v1 & v2 - -* http://registry.example.com proxify Docker Registry 1.0 in Mirror mode -* https://registry.example.com proxify Docker Registry 1.0 or 2.0 in Hosting mode - -## 3 Docker containers should be started - -* Docker Registry 1.0 in Mirror mode : port 5001 -* Docker Registry 1.0 in Hosting mode : port 5000 -* Docker Registry 2.0 in Hosting mode : port 5002 - -### Registry v1 - - docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/docker-registry/storage/hosting-v1:/tmp -p 5000:5000 registry:0.9.1" - -### Mirror - - docker run -d -e SETTINGS_FLAVOR=dev -e STANDALONE=false -e MIRROR_SOURCE=https://registry-1.docker.io -e MIRROR_SOURCE_INDEX=https://index.docker.io \ - -e MIRROR_TAGS_CACHE_TTL=172800 -v /var/lib/docker-registry/storage/mirror:/tmp -p 5001:5000 registry:0.9.1" - -### Registry v2 - - docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/axway/docker-registry/storage/hosting2-v2:/tmp -p 5002:5000 registry:2" - -# For Hosting mode access - -* users should have account (valid-user) to be able to fetch images -* only users using account docker-deployer will be allowed to push images diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/apache.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/apache.conf deleted file mode 100644 index 3300a7c0240c..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/apache/apache.conf +++ /dev/null @@ -1,127 +0,0 @@ -# -# Sample Apache 2.x configuration where : -# - - - - ServerName registry.example.com - ServerAlias www.registry.example.com - - ProxyRequests off - ProxyPreserveHost on - - # no proxy for /error/ (Apache HTTPd errors messages) - ProxyPass /error/ ! - - ProxyPass /_ping http://localhost:5001/_ping - ProxyPassReverse /_ping http://localhost:5001/_ping - - ProxyPass /v1 http://localhost:5001/v1 - ProxyPassReverse /v1 http://localhost:5001/v1 - - # Logs - ErrorLog ${APACHE_LOG_DIR}/mirror_error_log - CustomLog ${APACHE_LOG_DIR}/mirror_access_log combined env=!dontlog - - - - - - - ServerName registry.example.com - ServerAlias www.registry.example.com - - SSLEngine on - SSLCertificateFile /etc/apache2/ssl/registry.example.com.crt - SSLCertificateKeyFile /etc/apache2/ssl/registry.example.com.key - - # Higher Strength SSL Ciphers - SSLProtocol all -SSLv2 -SSLv3 -TLSv1 - SSLCipherSuite RC4-SHA:HIGH - SSLHonorCipherOrder on - - # Logs - ErrorLog ${APACHE_LOG_DIR}/registry_error_ssl_log - CustomLog ${APACHE_LOG_DIR}/registry_access_ssl_log combined env=!dontlog - - Header always set "Docker-Distribution-Api-Version" "registry/2.0" - Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" - RequestHeader set X-Forwarded-Proto "https" - - ProxyRequests off - ProxyPreserveHost on - - # no proxy for /error/ (Apache HTTPd errors messages) - ProxyPass /error/ ! - - # - # Registry v1 - # - - ProxyPass /v1 http://localhost:5000/v1 - ProxyPassReverse /v1 http://localhost:5000/v1 - - ProxyPass /_ping http://localhost:5000/_ping - ProxyPassReverse /_ping http://localhost:5000/_ping - - # Authentication require for push - - Order deny,allow - Allow from all - AuthName "Registry Authentication" - AuthType basic - AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" - - # Read access to authentified users - - Require valid-user - - - # Write access to docker-deployer account only - - Require user docker-deployer - - - - - # Allow ping to run unauthenticated. - - Satisfy any - Allow from all - - - # Allow ping to run unauthenticated. - - Satisfy any - Allow from all - - - # - # Registry v2 - # - - ProxyPass /v2 http://localhost:5002/v2 - ProxyPassReverse /v2 http://localhost:5002/v2 - - - Order deny,allow - Allow from all - AuthName "Registry Authentication" - AuthType basic - AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" - - # Read access to authentified users - - Require valid-user - - - # Write access to docker-deployer only - - Require user docker-deployer - - - - - - - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh deleted file mode 100644 index d907cf5c30b9..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/ceph/ci-setup.sh +++ /dev/null @@ -1,119 +0,0 @@ -#! /bin/bash -# -# Ceph cluster setup in Circle CI -# - -set -x -set -e -set -u - -NODE=$(hostname) -CEPHDIR=/tmp/ceph - -mkdir cluster -pushd cluster - -# Install -retries=0 -until [ $retries -ge 5 ]; do - pip install ceph-deploy && break - retries=$[$retries+1] - sleep 30 -done - -retries=0 -until [ $retries -ge 5 ]; do - ceph-deploy install --release hammer $NODE && break - retries=$[$retries+1] - sleep 30 -done - -retries=0 -until [ $retries -ge 5 ]; do - ceph-deploy pkg --install librados-dev $NODE && break - retries=$[$retries+1] - sleep 30 -done - -echo $(ip route get 1 | awk '{print $NF;exit}') $(hostname) >> /etc/hosts -ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N "" -cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys -ssh-keyscan $NODE >> ~/.ssh/known_hosts -ceph-deploy new $NODE - -cat >> ceph.conf < 74acc70fa106 - Removing intermediate container edb84c2b40cb - Successfully built 74acc70fa106 - - The commmand outputs its progress until it completes. - -4. Start your configuration with compose. - - $ docker-compose up - Recreating compose_registryv1_1... - Recreating compose_registryv2_1... - Recreating compose_nginx_1... - Attaching to compose_registryv1_1, compose_registryv2_1, compose_nginx_1 - ... - - -5. In another terminal, display the running configuration. - - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - a81ad2557702 compose_nginx:latest "nginx -g 'daemon of 8 minutes ago Up 8 minutes 80/tcp, 443/tcp, 0.0.0.0:5000->5000/tcp compose_nginx_1 - 0618437450dd compose_registryv2:latest "registry cmd/regist 8 minutes ago Up 8 minutes 0.0.0.0:32777->5000/tcp compose_registryv2_1 - aa82b1ed8e61 registry:latest "docker-registry" 8 minutes ago Up 8 minutes 0.0.0.0:32776->5000/tcp compose_registryv1_1 - -### Explore a bit - -1. Check for TLS on your `nginx` server. - - $ curl -v https://localhost:5000 - * Rebuilt URL to: https://localhost:5000/ - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 5000 (#0) - * successfully set certificate verify locations: - * CAfile: none - CApath: /etc/ssl/certs - * SSLv3, TLS handshake, Client hello (1): - * SSLv3, TLS handshake, Server hello (2): - * SSLv3, TLS handshake, CERT (11): - * SSLv3, TLS alert, Server hello (2): - * SSL certificate problem: self signed certificate - * Closing connection 0 - curl: (60) SSL certificate problem: self signed certificate - More details here: http://curl.haxx.se/docs/sslcerts.html - -2. Tag the `v1` registry image. - - $ docker tag registry:latest localhost:5000/registry_one:latest - -2. Push it to the localhost. - - $ docker push localhost:5000/registry_one:latest - - If you are using the 1.6 Docker client, this pushes the image the `v2 `registry. - -4. Use `curl` to list the image in the registry. - - $ curl -v -X GET http://localhost:32777/v2/registry1/tags/list - * Hostname was NOT found in DNS cache - * Trying 127.0.0.1... - * Connected to localhost (127.0.0.1) port 32777 (#0) - > GET /v2/registry1/tags/list HTTP/1.1 - > User-Agent: curl/7.36.0 - > Host: localhost:32777 - > Accept: */* - > - < HTTP/1.1 200 OK - < Content-Type: application/json; charset=utf-8 - < Docker-Distribution-Api-Version: registry/2.0 - < Date: Tue, 14 Apr 2015 22:34:13 GMT - < Content-Length: 39 - < - {"name":"registry1","tags":["latest"]} - * Connection #0 to host localhost left intact - - This example refers to the specific port assigned to the 2.0 registry. You saw - this port earlier, when you used `docker ps` to show your running containers. - - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/docker-compose.yml b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/docker-compose.yml deleted file mode 100644 index 5cd048588a83..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/docker-compose.yml +++ /dev/null @@ -1,15 +0,0 @@ -nginx: - build: "nginx" - ports: - - "5000:5000" - links: - - registryv1:registryv1 - - registryv2:registryv2 -registryv1: - image: registry - ports: - - "5000" -registryv2: - build: "../../" - ports: - - "5000" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/Dockerfile deleted file mode 100644 index 2b252ec7d87f..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM nginx:1.7 - -COPY nginx.conf /etc/nginx/nginx.conf -COPY registry.conf /etc/nginx/conf.d/registry.conf -COPY docker-registry.conf /etc/nginx/docker-registry.conf -COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf deleted file mode 100644 index 65c4d7766e45..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry-v2.conf +++ /dev/null @@ -1,6 +0,0 @@ -proxy_pass http://docker-registry-v2; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf deleted file mode 100644 index 7b039a54ac3a..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/docker-registry.conf +++ /dev/null @@ -1,7 +0,0 @@ -proxy_pass http://docker-registry; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_set_header Authorization ""; # For basic auth through nginx in v1 to work, please comment this line -proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/nginx.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/nginx.conf deleted file mode 100644 index 63cd180d6b13..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/nginx.conf +++ /dev/null @@ -1,27 +0,0 @@ -user nginx; -worker_processes 1; - -error_log /var/log/nginx/error.log warn; -pid /var/run/nginx.pid; - -events { - worker_connections 1024; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - - keepalive_timeout 65; - - include /etc/nginx/conf.d/*.conf; -} - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/registry.conf deleted file mode 100644 index 47ffd2379ded..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/compose/nginx/registry.conf +++ /dev/null @@ -1,41 +0,0 @@ -# Docker registry proxy for api versions 1 and 2 - -upstream docker-registry { - server registryv1:5000; -} - -upstream docker-registry-v2 { - server registryv2:5000; -} - -# No client auth or TLS -server { - listen 5000; - server_name localhost; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location /v2/ { - # Do not allow connections from docker 1.5 and earlier - # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents - if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { - return 404; - } - - # To add basic authentication to v2 use auth_basic setting plus add_header - # auth_basic "registry.localhost"; - # auth_basic_user_file test.password; - # add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; - - include docker-registry-v2.conf; - } - - location / { - include docker-registry.conf; - } -} - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/Dockerfile deleted file mode 100644 index 8cc504c31c3b..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/Dockerfile +++ /dev/null @@ -1,46 +0,0 @@ -FROM debian:jessie - -MAINTAINER Docker Distribution Team - -# compile and runtime deps -# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - # For DIND - ca-certificates \ - curl \ - iptables \ - procps \ - e2fsprogs \ - xz-utils \ - # For build - build-essential \ - file \ - git \ - net-tools \ - && apt-get clean && rm -rf /var/lib/apt/lists/* - -# Install Docker -ENV VERSION 1.7.1 -RUN curl -L -o /usr/local/bin/docker https://test.docker.com/builds/Linux/x86_64/docker-${VERSION} \ - && chmod +x /usr/local/bin/docker - -# Install DIND -RUN curl -L -o /dind https://raw.githubusercontent.com/docker/docker/v1.8.1/hack/dind \ - && chmod +x /dind - -# Install bats -RUN cd /usr/local/src/ \ - && git clone https://github.com/sstephenson/bats.git \ - && cd bats \ - && ./install.sh /usr/local - -# Install docker-compose -RUN curl -L https://github.com/docker/compose/releases/download/1.3.3/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose \ - && chmod +x /usr/local/bin/docker-compose - -RUN mkdir -p /go/src/github.com/docker/distribution -WORKDIR /go/src/github.com/docker/distribution/contrib/docker-integration - -VOLUME /var/lib/docker - -ENTRYPOINT ["/dind"] diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md deleted file mode 100644 index e12bec1ad51c..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/README.md +++ /dev/null @@ -1,138 +0,0 @@ -# Docker Registry Integration Testing - -These integration tests cover interactions between the Docker daemon and the -registry server. All tests are run using the docker cli. - -The compose configuration is intended to setup a testing environment for Docker -using multiple registry configurations. These configurations include different -combinations of a v1 and v2 registry as well as TLS configurations. - -## Running inside of Docker -### Get integration container -The container image to run the integation tests will need to be pulled or built -locally. - -*Building locally* -``` -$ docker build -t distribution/docker-integration . -``` - -### Run script - -Invoke the tests within Docker through the `run.sh` script. - -``` -$ ./run.sh -``` - -Run with aufs driver and tmp volume -**NOTE: Using a volume will prevent multiple runs from needing to -re-pull images** -``` -$ DOCKER_GRAPHDRIVER=aufs DOCKER_VOLUME=/tmp/volume ./run.sh -``` - -### Example developer flow - -These tests are useful for developing both as a registry and docker -core developer. The following setup may be used to do integration -testing between development versions - -Insert into your `.zshrc` or `.bashrc` - -``` -# /usr/lib/docker for Docker-in-Docker -# Set this directory to make each invocation run much faster, without -# the need to repull images. -export DOCKER_VOLUME=$HOME/.docker-test-volume - -# Use overlay for all Docker testing, try aufs if overlay not supported -export DOCKER_GRAPHDRIVER=overlay - -# Name this according to personal preference -function rdtest() { - if [ "$1" != "" ]; then - DOCKER_BINARY=$GOPATH/src/github.com/docker/docker/bundles/$1/binary/docker - if [ ! -f $DOCKER_BINARY ]; then - current_version=`cat $GOPATH/src/github.com/docker/docker/VERSION` - echo "$DOCKER_BINARY does not exist" - echo "Current checked out docker version: $current_version" - echo "Checkout desired version and run 'make binary' from $GOPATH/src/github.com/docker/docker" - return 1 - fi - fi - - $GOPATH/src/github.com/docker/distribution/contrib/docker-integration/run.sh -} -``` - -Run with Docker release version -``` -$ rdtest -``` - -Run using local development version of docker -``` -$ cd $GOPATH/src/github.com/docker/docker -$ make binary -$ rdtest `cat VERSION` -``` - -## Running manually outside of Docker - -### Install Docker Compose - -[Docker Compose Installation Guide](http://docs.docker.com/compose/install/) - -### Start compose setup -``` -docker-compose up -``` - -### Install Certificates -The certificates must be installed in /etc/docker/cert.d in order to use TLS -client auth and use the CA certificate. -``` -sudo sh ./install_certs.sh -``` - -### Test with Docker -Tag an image as with any other private registry. Attempt to push the image. - -``` -docker pull hello-world -docker tag hello-world localhost:5440/hello-world -docker push localhost:5440/hello-world - -docker tag hello-world localhost:5441/hello-world -docker push localhost:5441/hello-world -# Perform login using user `testuser` and password `passpassword` -``` - -### Set /etc/hosts entry -Find the non-localhost ip address of local machine - -### Run bats -Run the bats tests after updating /etc/hosts, installing the certificates, and -running the `docker-compose` script. -``` -bats -p . -``` - -## Configurations - -Port | V2 | V1 | TLS | Authentication ---- | --- | --- | --- | --- -5000 | yes | yes | no | none -5001 | no | yes | no | none -5002 | yes | no | no | none -5011 | no | yes | yes | none -5440 | yes | yes | yes | none -5441 | yes | yes | yes | basic (testuser/passpassword) -5442 | yes | yes | yes | TLS client -5443 | yes | yes | yes | TLS client (no CA) -5444 | yes | yes | yes | TLS client + basic (testuser/passpassword) -5445 | yes | yes | yes (no CA) | none -5446 | yes | yes | yes (no CA) | basic (testuser/passpassword) -5447 | yes | yes | yes (no CA) | TLS client -5448 | yes | yes | yes (SSLv3) | none diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml deleted file mode 100644 index d664c7bde778..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/docker-compose.yml +++ /dev/null @@ -1,27 +0,0 @@ -nginx: - build: "nginx" - ports: - - "5000:5000" - - "5001:5001" - - "5002:5002" - - "5011:5011" - - "5440:5440" - - "5441:5441" - - "5442:5442" - - "5443:5443" - - "5444:5444" - - "5445:5445" - - "5446:5446" - - "5447:5447" - - "5448:5448" - links: - - registryv1:registryv1 - - registryv2:registryv2 -registryv1: - image: registry:0.9.1 - ports: - - "5000" -registryv2: - build: "../../" - ports: - - "5000" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/helpers.bash b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/helpers.bash deleted file mode 100644 index 60d96ae095c7..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/helpers.bash +++ /dev/null @@ -1,21 +0,0 @@ -# Start docker daemon -function start_daemon() { - # Drivers to use for Docker engines the tests are going to create. - STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} - EXEC_DRIVER=${EXEC_DRIVER:-native} - - docker --daemon --log-level=panic \ - --storage-driver="$STORAGE_DRIVER" --exec-driver="$EXEC_DRIVER" & - DOCKER_PID=$! - - # Wait for it to become reachable. - tries=10 - until docker version &> /dev/null; do - (( tries-- )) - if [ $tries -le 0 ]; then - echo >&2 "error: daemon failed to start" - exit 1 - fi - sleep 1 - done -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh deleted file mode 100644 index c1fa2b20f3ed..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/install_certs.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -set -e - -hostname=$1 -if [ "$hostname" = "" ]; then - hostname="localhost" -fi - -mkdir -p /etc/docker/certs.d/$hostname:5011 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5011/ca.crt - -mkdir -p /etc/docker/certs.d/$hostname:5440 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5440/ca.crt - -mkdir -p /etc/docker/certs.d/$hostname:5441 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5441/ca.crt - -mkdir -p /etc/docker/certs.d/$hostname:5442 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5442/ca.crt -cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5442/client.cert -cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5442/client.key - -mkdir -p /etc/docker/certs.d/$hostname:5443 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5443/ca.crt -cp ./nginx/ssl/registry-noca+client-cert.pem /etc/docker/certs.d/$hostname:5443/client.cert -cp ./nginx/ssl/registry-noca+client-key.pem /etc/docker/certs.d/$hostname:5443/client.key - -mkdir -p /etc/docker/certs.d/$hostname:5444 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5444/ca.crt -cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5444/client.cert -cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5444/client.key - -mkdir -p /etc/docker/certs.d/$hostname:5447 -cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5447/client.cert -cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5447/client.key - -mkdir -p /etc/docker/certs.d/$hostname:5448 -cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5448/ca.crt diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile deleted file mode 100644 index 04515e8c41d6..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM nginx:1.9 - -COPY nginx.conf /etc/nginx/nginx.conf -COPY registry.conf /etc/nginx/conf.d/registry.conf -COPY docker-registry.conf /etc/nginx/docker-registry.conf -COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf -COPY registry-noauth.conf /etc/nginx/registry-noauth.conf -COPY registry-basic.conf /etc/nginx/registry-basic.conf -COPY test.passwd /etc/nginx/test.passwd -COPY ssl /etc/nginx/ssl diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf deleted file mode 100644 index 65c4d7766e45..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry-v2.conf +++ /dev/null @@ -1,6 +0,0 @@ -proxy_pass http://docker-registry-v2; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf deleted file mode 100644 index 5b1a2d580cf3..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/docker-registry.conf +++ /dev/null @@ -1,7 +0,0 @@ -proxy_pass http://docker-registry; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_set_header Authorization ""; # see https://github.com/docker/docker-registry/issues/170 -proxy_read_timeout 900; diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf deleted file mode 100644 index 63cd180d6b13..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/nginx.conf +++ /dev/null @@ -1,27 +0,0 @@ -user nginx; -worker_processes 1; - -error_log /var/log/nginx/error.log warn; -pid /var/run/nginx.pid; - -events { - worker_connections 1024; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - - keepalive_timeout 65; - - include /etc/nginx/conf.d/*.conf; -} - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf deleted file mode 100644 index 3c629ae8b2c3..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-basic.conf +++ /dev/null @@ -1,13 +0,0 @@ -client_max_body_size 0; -chunked_transfer_encoding on; -location /v2/ { - auth_basic "registry.localhost"; - auth_basic_user_file test.passwd; - add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; - include docker-registry-v2.conf; -} -location / { - auth_basic "registry.localhost"; - auth_basic_user_file test.passwd; - include docker-registry.conf; -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf deleted file mode 100644 index 883a2d48bb3a..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry-noauth.conf +++ /dev/null @@ -1,8 +0,0 @@ -client_max_body_size 0; -chunked_transfer_encoding on; -location /v2/ { - include docker-registry-v2.conf; -} -location / { - include docker-registry.conf; -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf deleted file mode 100644 index b402eacbb8e8..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/registry.conf +++ /dev/null @@ -1,277 +0,0 @@ -# Docker registry proxy for api versions 1 and 2 - -upstream docker-registry { - server registryv1:5000; -} - -upstream docker-registry-v2 { - server registryv2:5000; -} - -# No client auth or TLS -server { - listen 5000; - server_name localhost; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location /v2/ { - # Do not allow connections from docker 1.5 and earlier - # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents - if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { - return 404; - } - - include docker-registry-v2.conf; - } - - location / { - include docker-registry.conf; - } -} - -# No client auth or TLS (V1 Only) -server { - listen 5001; - server_name localhost; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location / { - include docker-registry.conf; - } -} - -# No client auth or TLS (V2 Only) -server { - listen 5002; - server_name localhost; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location / { - include docker-registry-v2.conf; - } -} - -# TLS localhost (V1 Only) -server { - listen 5011; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - - client_max_body_size 0; - chunked_transfer_encoding on; - location / { - include docker-registry.conf; - } -} - -# TLS localregistry (V1 Only) -server { - listen 5011; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - - client_max_body_size 0; - chunked_transfer_encoding on; - location / { - include docker-registry.conf; - } -} - - - -# TLS Configuration chart -# Username/Password: testuser/passpassword -# | ca | client | basic | notes -# 5440 | yes | no | no | Tests CA certificate -# 5441 | yes | no | yes | Tests basic auth over TLS -# 5442 | yes | yes | no | Tests client auth with client CA -# 5443 | yes | yes | no | Tests client auth without client CA -# 5444 | yes | yes | yes | Tests using basic auth + tls auth -# 5445 | no | no | no | Tests insecure using TLS -# 5446 | no | no | yes | Tests sending credentials to server with insecure TLS -# 5447 | no | yes | no | Tests client auth to insecure -# 5448 | yes | no | no | Bad SSL version - -server { - listen 5440; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - include registry-noauth.conf; -} - -server { - listen 5441; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - include registry-basic.conf; -} - -server { - listen 5442; - listen 5443; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-noauth.conf; -} - -server { - listen 5444; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-basic.conf; -} - -server { - listen 5445; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; - include registry-noauth.conf; -} - -server { - listen 5446; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; - include registry-basic.conf; -} - -server { - listen 5447; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-noauth.conf; -} - -server { - listen 5448; - server_name localhost; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; - ssl_protocols SSLv3; - include registry-noauth.conf; -} - -# Add configuration for localregistry server_name -# Requires configuring /etc/hosts to use -# Set /etc/hosts entry to external IP, not 127.0.0.1 for testing -# Docker secure/insecure registry features -server { - listen 5440; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - include registry-noauth.conf; -} - -server { - listen 5441; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - include registry-basic.conf; -} - -server { - listen 5442; - listen 5443; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-noauth.conf; -} - -server { - listen 5444; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-basic.conf; -} - -server { - listen 5445; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; - include registry-noauth.conf; -} - -server { - listen 5446; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; - include registry-basic.conf; -} - -server { - listen 5447; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; - ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; - ssl_verify_client on; - include registry-noauth.conf; -} - -server { - listen 5448; - server_name localregistry; - ssl on; - ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; - ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; - ssl_protocols SSLv3; - include registry-noauth.conf; -} - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+ca.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+ca.pem deleted file mode 100644 index 8c9b1bca3273..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+ca.pem +++ /dev/null @@ -1,29 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIE9TCCAt+gAwIBAgIQMsdPWoLAso/tIOvLk8R/sDALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw -NTQwMVoXDTE4MDUxMDIwNTQwMVowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV -BAMTCFF1aWNrVExTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1YeX -GTvXPKlWA2lMbCvIGB9JYld/otf8aqs6euVJK1f09ngj5b6VoVlI8o1ScVcHKlKx -BGfPMThnM7fiEmsfDSPuCIlGmTqR0t4t9dHRnLBGbZmR8JdAs7LKpP+PFYu0JTIT -wFcjXIs+45cIF2HpsYY6zkj0bmNsyYmT1U1BTW+qqmhvc0Jkr+ikElOQ93Pn7zIO -cXtxdERdzdzXY5cfL3CCaoJDgXOsKPQfYrCi5Zl6sLZVBkIc6Q2fErSIjTp45+NY -AjiOxfUT0MOFtA0/HzYvVp3gTNPGEWM3dF1hwzCqJ32odbw/3TiFCEeC1B82p1sR -sgoFZ6Vbfy9fMhB5S7BBtbqF09Yq/PMM3drOvWIxMF4aOY55ilrtKVwmnckiB0mE -CPOColUUyiWIwwvp82InYsX5ekfS4x1mX1iz8zQEuTF5QHdKiUfd4A33ZMf0Ve6p -y9SaMmos99uVQMzWlwj7nVACXjb9Ee6MY/ePRl7Z2gBxEYV41SGFRg8LNkQ//fYk -o2vJ4Bp4aOh/O3ZQNv1eqEDmf/Su5lYCzURyQ2srcRRdwpteDPX+NHYn2d07knHN -NQvOJn6EkcsDbgp0vSr6mFDv2GZWkTOAd8jZyrcErrLHAxRNm0Va+CEIKLhswf1G -Y2kFkPL1otI8OSDvdJSjZ2GjRSwXhM2Mf3PzfAkCAwEAAaMjMCEwDgYDVR0PAQH/ -BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4ICAQDBxOHKnF9z -PZWPNKDRmBPtmnU2IHh6JJ9HzqGALJJbBU0MUSD/aLBBkYeS0YSHgYZ1hXLsfuRU -lm/czV41hU1FTDqS2fFpcAAGH+6/rwyfrz+GYr2K4b/ijCwOMbMrDWO54zqZT3KU -GFBpkrh4fNyKdgUNJsy0Q0it3gOGSUmLvEQUzqxPFVz7h/pF/Cecr0/kpjbpsxna -XQkhtDyKDIQfPCq8Ci1vox5WvBbBkdzDtyCm+KSb6VC3pCX6LV5NkS7YM7mtscTi -QdYfLbKX05kUVG2R9SShJn5BSXzGk9M5FR5koGY0lMHwmJqaOqazXjqa1jR7UNDK -UyExHIXSqJ+nCf4bChEsaC1uwu3Gr7PfP41Zb2U3Raf8UmFnbz6Hx0sS4zBvyJ5w -Ntemve4M1mB7++oLZ4PkuwK82SkQ8YK0z+lGJQRjg/HP3fVETV8TlIPJAvg7bRnH -sMrLb/V+K6iY+08kQ2rpU02itRjKnU/DLoha4KVjafY8eIcIR2lpwrYjx+KYpkcF -AMEC7MnuzhyUfDL++GO6XGwRnx2E54MnKtkrECObMSzwuLysPmjhrEUH6YR7zGib -KmN6vQkA4s5053R+Tu0k1JGaw90SfvcW4bxGcFjU4Kg0KqlY1y8tnt+ZiHmK0naA -KauB3KY1NiL+Ng5DCzNdkwDkWH78ZguI2w== ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem deleted file mode 100644 index a239939d0899..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem +++ /dev/null @@ -1,29 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIE9TCCAt+gAwIBAgIRAKbgxG1zgQI81ISaHxqLfpcwCwYJKoZIhvcNAQELMCYx -ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNTA1MjYy -MDU0MjJaFw0xODA1MTAyMDU0MjJaMBMxETAPBgNVBAoTCFF1aWNrVExTMIICIjAN -BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/ -oARHbx59G+GOeGkrwG6ZWSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8M -WpLxp5U9LyYkv0AiSPfT2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/ -MgJbdTylEq1UcZSLMuky+RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7II -hGlhziLVTKV9W1RP8Aop8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4 -nFwmuhOo8gvw/HhzYcxyMHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCai -gwUNzfe4/dHeCk/r3pteWOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru -5QqKMrbSlOcd6yHT6NM1ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/ -Vlp5N+WRjDpsBscR8kt2Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoO -nhRqhl2PSphcWdimk8Bwf5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3j -NLQ8EmHWaZlJSeW4BiDYsXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeB -twZJXIXR6Jc8hgsCAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgCgMBMGA1UdJQQMMAoG -CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4ICAQCl0cTLbLIn -XFuxreei+y6TlG2Z5XcxJ84mr8VLAaQMlJOLZV0O/suFBu9KqBuvPaHhGRnKE2uw -Vxdj9qaDdvmvuzi4jYyUA/sQuqq1+wHwGTadOi9r0IsL8OxzsG16OlhuXzhoQVdw -C9z1jad4HC7uihQ5yhl2ltAA+h5G0Sr1b9El2mx4p6BV+okmTvrqrmjshQb1GZwx -jG6SJ/uvjGf7rn09ZyYafF9ZDTMNodNXjW8orqGlFdXZLPFJ9agUFfwWfqD2lrtm -Fu+Ei0ZvKOtyzmh06eO2aGAHJCBTfcDM4tBKBKp0MOMoZkcQQDNpSyI12j6s1wtx -/1dC8QDyfFpZFXTbKn3q+6MpR+u5zqVquYjwP5DqGTvX0e1sLSthv7LRiOi0qHv1 -bZ8JoWhRMNumui9mzwar5t20ExcWxGxizZY+t+OIj4kaAeRoKK6r6FrYBnTjM+iR -+xtML5UHPOSmYfNcai0Wn4T7hwpgnCJ+K7qGYjFUCarsINppQEwkxHAvuX+asc38 -nA0wd7ByulkMJph0gP6j6LuJf28JODi6EQ7FcQItMeTuPrc+mpqJ4jP7vTTSJG7Q -wvqXLMgFQFR+2PG0s10hbY/Y/nwZAROfAs7ADED+EcDPTl/+XjVyo/aYIeOb/07W -SpS/cacZYUsSLgB4cWbxElcc/p7CW1PbOA== ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem deleted file mode 100644 index acfc9a487319..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/oARHbx59G+GOeGkrwG6Z -WSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8MWpLxp5U9LyYkv0AiSPfT -2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/MgJbdTylEq1UcZSLMuky -+RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7IIhGlhziLVTKV9W1RP8Aop -8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4nFwmuhOo8gvw/HhzYcxy -MHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCaigwUNzfe4/dHeCk/r3pte -WOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru5QqKMrbSlOcd6yHT6NM1 -ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/Vlp5N+WRjDpsBscR8kt2 -Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoOnhRqhl2PSphcWdimk8Bw -f5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3jNLQ8EmHWaZlJSeW4BiDY -sXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeBtwZJXIXR6Jc8hgsCAwEA -AQKCAgBJcL1iR5ROMtr0ZNIp4gciALfjQVV3gb48GR/e/9b/LWI0j3i0sOzeLN3h -SLda1fjzOn1Td1ma0dZwmdMUOF+hvhPDYZfzkwWLLkThXgLt/At3rMYstGWa8pN2 -wVUSH7sri7IHmYedP3baQdrHP/9pUsGQc+m8ASTE3i+PFcKbPe5+818HTtRrhVgN -X3oNmPKUNCmSom7ZcKer5P1+Ruum0NuDgomCdkoZgfhjeKeLrVjl/wXDSQL/AhWA -02c4/sML7xx19nl8uf7z+Gj0ir1pvRouhRJTwnRc4KdWu+Yn7WLU8j2ZKf5St/as -zjnpYVEdCp0KSHccgXtobUZDEG2NCHmM6gR2j3qgoUAYjHyqPYlph2r5C47q+p4c -dDWkpwZwGiuYq9qpZj24X6BfppxExcX6AwOgFLZLp80IynwrMVxFsDd2J+KpKRQ1 -+ZtYPcULwInF9MNi/dv84pxGOmmOaIUyjN8Sw4eqANU4T5uvTjUj7Ou6KYyfmxgG -y++vjpRN7tN1t1Hwde8SVWobvmhU+5SJVHV8INoJD7uciaevPo9pt833SQTtDXeY -PVBhOKO7thAxdUiqlU/1nGTXnf1VO6wAjaVYoTnP4tJ97WuTptwd2F5znVWHFGVh -lzJAzmFOuyCnRnInsf4n5EmWJnT7XF2CofQqAJ8NIddrU8GnQQKCAQEAyqWAiPMK -I/dMzlS7oJGlhbKZ5R4buc+EoZqtW7/8/S+0L6IaQvpEUilD+aDQyaxXjoKiQQL+ -0UeeSmF/zU5BsOTpB8AuJUfYoUe0N+x7hO5eIcoCB/QWYX+iC3tCN4j1Iwt6VliV -PBYEiLUYPngSIHob/nK8UtgxrWQ3Fik9XJtWhePHrvMvDBalgCKdnyhuucGxKUjc -TtPcyMFdi0z4Kt/FAm+5u/v4ZkO909Ish0FrAqQ9t5ETfvTTTYKBmzny6/LSPTK9 -0XIsHltuC1xG4vGQsES/Ph++Yj3Vn011FqvFZeBUHbfcQuB4h5wcb+90d4GU1kux -eabsHPIZKrlN4QKCAQEA2Fs8NAN5K9i7qbxZCJPi6DJV6XMznk6JVGb+qkkChCyq -IOXb95+c9CIpe6w2d3res3zvML3zbdz2Lyp9G0ve6tSlOaSnHeyIxZ5SRB+yQrcF -GXtsx370bOGjCi1/NH85kwKlMuROFJKleJQv8rKpIEo5aPSPV9Cc/VsUqBpvR+O0 -U1HMv57P4yJA/ddw6imHJBl3jTmWBpK4B+LBsCbdypxdVoO8t32Lb2BqDTaPJfYU -RJUpjn/efLLoP6CWxYtqpUlY5tc7NJGAokl8Fo1mPn02klydvs09uiXE80Li2Hoc -/meMH07Lbt2VTw6iGNRX6VpIHEUZGZeS6rbAvO4ZawKCAQEAjOtGVPXdyWEB0kHu -MBzYY/7tMf0b/rymWNL9Vt5NiauQu8cYSBdNR21WzdLdHkFwqbOCLX9twA7zrnna -q+SNnfuxaShlbptls9HvKyySQMCaSRj3DJzaq3ZcM2vFgmUFQxeKPV1geeY9xOta -LqbExDzmFq2m9F1PPmqAPDL1bt6+7mCVzb1irB9be52WysUNKrPdBP6b5V1DHYAK -EwK1WOs/TxBusqDn/gWBjjmLqYr+ZVndaTfDvPd3sWDdzBoiKZ40QUZ15Z5lu76M -6e2DhfHCUjGcZBEjDaI+WYc9s0REAzJajEf9Lax3ZKZUyCpWbXx5CgSdKCHB8+cP -RTyTQQKCAQEAsxx8r5a8hocLfQ43Kvm7HH0nUHeVoRXlbOFDLNf6ZE/RnCCOxOX3 -esiZTRAZmzo2CaOBJPnr/+SwTgW/woxCBGh8TEc6LnS2GdviwRD4c3CuoRTjzhgU -49q8Ld3SdDRrBoBnIMWOuktY/4S2WRZ9GwU3l+L2lD1Y6gmwBSa1P2+Lxnpupagk -9CVUZpEnokM05LbMmTa2M8Tc43Je5KSYcnaWctvmrIUbnN3VjhC/2y5oQwq1d4n2 -N4eo65vXlbzAUgtxtNEz62YVdsSdHNJ8dXkVZ3+S+/VPh75i2PxjbdFSFW7Futlx -YtvAEs3LdgC8squSDQ1LJTutXfBjiUUX9wKCAQBiCMre86tLyJu6Qb6X1cRAwO7m -4kyGzIUtijXko6mWxb4X/usVvzhSaNVYbHbMZXjX+J5vhBOul+RmQ3EY6nw0H2z8 -9D4z/rnQVqeb0uvIeUhBPni+s4fS4bA92M6Ie5bhiOSF2JjjJr38BFnTZARE7C+7 -ZII7z2c0eQz/wAAt9fWWroAB2mIm6wxq0LNij2NoE0iq6k2xJE1/k8qhXpsN0zAv -bjG72Q7WryBeK/eIDK9e5wGlfLVDOx2Evlcaj70oJxuoRh57e8fCYy8huJQT+Wlx -Qw4zhxiyzAMq8SEqFsm8dVO4Bu2FwzmmehA80ieSb+si7JZU92xGDT394Im2 ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem deleted file mode 100644 index 3ca47f45893c..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem +++ /dev/null @@ -1,29 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFCTCCAvOgAwIBAgIQdcXDOHrLsd2ENSfj5h8ZmjALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw -NTQwM1oXDTE4MDUxMDIwNTQwM1owJzERMA8GA1UEChMIUXVpY2tUTFMxEjAQBgNV -BAMTCWxvY2FsaG9zdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2K -saEVcHq0eldu5kABbWtZsf9keK7lz8beVIowzOqp5IHpGlggtH7xDVeigA/sLdds -WTgKEOq3zsJzdgfEti5TNAjjmPqjMKkolqv3LXDJG0dZ2GZ8W/eBB6X1wB0LKr3i -ye3/5jb/wCZYVGGMQXj0VQxY8Qq+OHEp0effeheJqA0OYOj+RaZwi20OR/KmJRgY -wXU33bZyapuyT4krhFlFbtzXeKsKQPrT2ePWxPAceqUGUTIqyJySYIw6vb72YxjX -FNRw6Jg7B7RqVJaVCfBrVxtAv+rCLOhUOVYmWhgWEIODPXiqOGwB0VUApAVAYqfi -TYnJIZ7QYLlQx5VPNlzZuSJTUzKmHQLtLcTqdO5HmLxfxc0WuS/ftK916wy/jpSc -m2DiHjIy6aAEaHKGQrNgT+no68kp30xkYAVsIs0BFpl6Q2iNr5e0uKta82A0xU1Q -we7swSHOHCevuDZfFA/CqnBptOjvNUuVytcroCeCrV/ftp75w/Fd9zOcb6LGLxM2 -2UzhkSXl3II250xj74Q3q8T9TDxCLty7oiawhaYKI+8SDYc510EQ7MH46WMO+3Uq -JkpmmELd9POgnnZ1JrCFmf0flUKTi2CqU3wrBPpPMwFBxoFipp5iL87npACHc3DY -6uaoF4Pf9Et1Fd7HRon8RMsKkrSF92NFiBx5UvhZAgMBAAGjNjA0MA4GA1UdDwEB -/wQEAwIAoDAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDALBgkq -hkiG9w0BAQsDggIBAC0F4ci1nqZ9KUhEEAmWmy8g89DovNNIGSC51r2WJ/COmYUX -X70TONscsBL/kx5MK4xoAmb+EN6Yy8i+z9NkNJd0B+2MjXPMFBpgGb0UiPv2wEmZ -5PAKyjwTxNIm6L/nFhkmVqfsQHfjHukXES4C0ff6fj6fuDpBfl5nTlVmc9LpP+hT -5RAwW10qumucGxAWGNBWW+K66cf8O7n/0nQykxJxYjBx16ZB80H2uvqFDKDVFqze -co5M4euXQq9KiXPRlcC9rab2a7FGLHd0TyPkq6TvfsqpxcryyKS4rIAz3sQh/tl/ -/qm1tBcZW2bce3UlF2Wb2dW9HqvIu1O84f6ptLqwgKcIdTbwgQZ0kbFoWE2kWJSV -w+eAFb7tz1LDTpF3NRlz+1K27pBQWRQgcqoIRoQXpC0LfQY9Mp70QIfUQdUh6tnO -8hmq5y623tfxiDwCxb/EOpwCmwK1Cp9cloZTDefVE1r6NkEJWeeHG79VljUGF1KT -NKzXWrrsFtge/hU9Pj+frcZO9qExxPCcsrdZcoK7Ll8s+pjulRvbnCnJkNpeOI3P -iz6+sdGmzKSKg2daRM67Zmy5tmlBEX/eV7kFqt+b3HsdUiLo3Ng2lyPLNNDfwUtB -EukgYGjVJoyqLjLXgsCxLJlk7X/ogVwf8SlAnQ7p6KuxGWm02vlUpEmJp+Hq ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem deleted file mode 100644 index baccc9eb807e..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJJwIBAAKCAgEArYqxoRVwerR6V27mQAFta1mx/2R4ruXPxt5UijDM6qnkgeka -WCC0fvENV6KAD+wt12xZOAoQ6rfOwnN2B8S2LlM0COOY+qMwqSiWq/ctcMkbR1nY -Znxb94EHpfXAHQsqveLJ7f/mNv/AJlhUYYxBePRVDFjxCr44cSnR5996F4moDQ5g -6P5FpnCLbQ5H8qYlGBjBdTfdtnJqm7JPiSuEWUVu3Nd4qwpA+tPZ49bE8Bx6pQZR -MirInJJgjDq9vvZjGNcU1HDomDsHtGpUlpUJ8GtXG0C/6sIs6FQ5ViZaGBYQg4M9 -eKo4bAHRVQCkBUBip+JNickhntBguVDHlU82XNm5IlNTMqYdAu0txOp07keYvF/F -zRa5L9+0r3XrDL+OlJybYOIeMjLpoARocoZCs2BP6ejrySnfTGRgBWwizQEWmXpD -aI2vl7S4q1rzYDTFTVDB7uzBIc4cJ6+4Nl8UD8KqcGm06O81S5XK1yugJ4KtX9+2 -nvnD8V33M5xvosYvEzbZTOGRJeXcgjbnTGPvhDerxP1MPEIu3LuiJrCFpgoj7xIN -hznXQRDswfjpYw77dSomSmaYQt3086CednUmsIWZ/R+VQpOLYKpTfCsE+k8zAUHG -gWKmnmIvzuekAIdzcNjq5qgXg9/0S3UV3sdGifxEywqStIX3Y0WIHHlS+FkCAwEA -AQKCAgAtZw3V8P/+el1PpqoCsNzpqwvQn36bc3CKvPwtM1tJQa2Q92V3DQdr9rDg -7pjGkankpGorKScH4ZLseLy2h5aKRCZm9PS/DhbbCs1wrDhtO5AxeKYPGhYNiOpx -VvwuHQ/Pohfmdn7KgNrKrW1WIBW5CWN+2X4mq2Gk6aYLHgKZSeB3mf1st6mNRACW -RZg5OZKW3VMv0a/l3cVaeqooXwQ/PtUkXhMp3ILnnKly3Gulzi2gIyj3EQ5vODSe -O3gND/UZOJwwgGG6Aief4fnDc7an+c1OSgBr8OVC21Ys3dfQWWV0os9gVFhymX8k -2AgRf6jP93sFw2NSY34KvcGZpKG59oMDxWF1vPo8sOt17Ey0+qp3eUtB3FfE7Wtf -BaLaD/x4U91izIqOEMzQ6QiZAyvmUoBkUSo125CYuIkt8C8Q1lA1KjihETWF37QR -mr8LUk0A0x3SErtm4wVfeDEqVSfI9gKpk6i6rlUzuCjv58Rc0yyqoghXwBWM4CKj -5ZHYpBKAxj4bM6IrKnodAOcsyVk2c2zVTaMxPhoUj0fF7IE5Hy6YAQ/yBheZEM1v -fhsdBFyS6OqSCnN6UinhH268QPam82lfKTFjW5lOgsSDQZ9rhiWoyamhonJTq65I -nb08f4mzT6OGMwV13zq8dXio6WnUIQAhXdEYWrMBmxp5b6CxAQKCAQEA4kmwV3Nb -n3ZIzVAp2l+yGZwdg4YWzN2kcfdNkL8I+Pn8pWrOwv/uGQYmM0786ys9kB5lu4FR -TMcoEo3AaK/z8N49ro2Kl6HcTmxZgTMr+cl6iwetzqYdkRK7klxyCv5uVloDQDtc -AulDH6RkW9BfRERpi6XtlgiFdJj5jMvXMpwGHX69JVsXb83ZSQESjI2JfO9Y8+4M -a7hNKWW/W0ZBrGCcQQPbgpysfJ+PFKUF/yF1h8SSCdetW2Kv2ix16wL5uHKINYmZ -Y/Om+/AFnUOQlANycgThtgBI5mvg9Khq6W2i/RNcIL7bvwAzq1p+o6cGnImXo4bY -hC4fs2/aeX17UQKCAQEAxFQHSLBYDLal5CQYbHbNZ2sLjwRUraEd/+BA8XoERVVQ -JPihgEvTPEaHnWrFTw0qaGKgMZ5SZCZSWUIfXjYvQIUcEMhNUOHweXhJJhifO5sd -sTuvU7bWg76F69bRKfp8KM266m7qMYv+tNlQ6Kbz/1ImsW00xb86vCK2hPfhldtN -d/iBb4HVDu1uoATHUNuqsSGj/UvttKudQdg7MapzM4N+D4m6rPZUjQmtoMWOXt7R -LYrqEOHWfkxXKlVHw1cL9uzUpArvnR0VcYvGfXiYJFbXWsEB07VxIoLMPEtPbpH9 -YLY37KugrthEVnsbySmZIWCRDEqQuuAaa5o8S1naiQKCAQAiU/dybMebe0A0FVMk -E5xbEjnP+AmBbqZBu7iCmthrnNDc70UKg/TEyxAEfJkVu+uM72+TcFy6/wNvPR3R -Q9AH3E8TKdm6gw1+wCUb2n1zWUND0Bhn3v9hQKw/2dJbJJnsc59GoTqmHmjWZgPr -gcLSAmbYjoVqW0STmZlR6KJuxQiQdOeQwS7fASVTU9xSgi43S7/80UIFHWJnQ04y -NIhF9CoAGuuz9ryb80CraxVrzNGdlQ5qe9OKp3/x4wjIbB0iBA3xwTwJ066jTZgs -cVF/gr5b2a28BHMKsZbgxqPhYYZ2SfeR6CJB6W/tML9BaFcybBUa85vpAW5BtFg6 -UfThAoIBAAp1/71byBVFVimF0tdUrTUpewAv1uM5hoOvy0YSnk+jcBXIObLAV40K -pQc6PTEtHmlZd/es2+8CK7kd0NYQRQxHC2vJgHUi1NFkG2GwRivC5B4hdAId5+g1 -KqWaWKLH+f2imKcNKeVh9Dxmp+z9mFquYelqTDmNKvADWX5URuzZNpOB5kOuw098 -TzyvhH9GdR3jEP3aIdxSmJp9jwnibyj7hKgHSq8UoQSy01GRtThQ3wxyLm6f2fH4 -11wmFyDNbpHFpL7o5kOU3SOjsvvUhSbKiccIKbTCIjkYhxFfYegeV0Xj767opjMq -ytlgzeY2FTa2EoR5JKUQc9fv6+6H5yECggEAVVfnywPm8QXn+ByFDdUndZg3uEje -DGyvt1M3mIz5geyRZO8ECzgsZVzKgZC8jDB4lPKz3AGgNlUl/vyGHk6CtW6V6ePA -EXcmOkkMKJQMdopY2/cE6YlSpBGMCcnfothgL0HXxYoop4xVjb74k7tFcNrIDoRx -zp9dSalgxx9aMeaURRbMWf8AhWLZUAjJ/359M1SmcNW619SL3p8Q95Nptvdiltww -lWOCkBdgkjW0mel+Mi2+gY8UPmgNBMPrJ1z9b7b7529YCv5Oci8ABn/N202nhjCp -LupADooNknOMLDyqwRorEv4g6wRjuPIYTIhI9fO5ranu089x+mmGU2tCBw== ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem deleted file mode 100644 index ee72c0c6e279..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem +++ /dev/null @@ -1,30 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFETCCAvugAwIBAgIQJ+iLgsp9gA0DmROqW+tHFzALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw -NTQxNloXDTE4MDUxMDIwNTQxNlowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV -BAMTDWxvY2FscmVnaXN0cnkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC -AQDHR/A6uiQ9X/Xh5ivmdjRr5XVr1D7+fU9Qu6ohArqtBuJsLr6t2RBTS9w6PIAf -xjQSMSFlrm/CY+hbfBMSgm9NeH23o3kYCgoEPhP/634A45W5xwUFno388U8/NHK7 -qwzSP1ezKXfXNvzuo1mZhT08aVdGMOrZUcZZZl8R3RPcIRw9XDSfXKVkMluH6egk -8iLdOxdIdRS58DeSI09FskWe3cIZ5kJmMqnKoIbYSJCVVeYPO0RFlIBi+zpdVyI/ -r9LG0r0plRdz/HJevbOitU2y93S1s9NWMNEkOFU1PFJmsF3ZzNqJFCySj00y/Hcs -jPULYwIxYdqcv16cTNmd3P6FegvuzLJLjNuGaLJGc1antv+p62P7ZdE3DyprFuxs -MJgDL9+NjDaIzoamFf0Uv7K3F7hxrrAHfvm1CMUOyQLg9J6Wl4mLsOy2ZhCbdNFs -T6dobAUGvz4Muj9V8V5pR+nFehjmsPENSsTcs5j0e8zTWtvMFISdS+NZAkpiz0s4 -PV8DLgk5Rp1ZG2V5OnRPLMOTgK0nngc5GVaxf7OYCrFHbBJ8tL93MXNQptNFeBpV -FhjUGqVFcz+6nbFX2NsFLZnghQRs9lej4TTG33NSAYusKqhVwpYFf8CsXCcvYuU6 -RlkCYjr3PB+nX1UDa0eUGm0zOabf9O3D1VzHQBpDuzSHQwIDAQABozowODAOBgNV -HQ8BAf8EBAMCAKAwDAYDVR0TAQH/BAIwADAYBgNVHREEETAPgg1sb2NhbHJlZ2lz -dHJ5MAsGCSqGSIb3DQEBCwOCAgEAaPfAs6saij4FZIPbzAb5M6ZVvfXBg+AfH52t -p3tFsnWUJCiOh9ywsc2NcmJdleKDc4/spElFMUarHqcE1ua6EH15O5GEnHWKj8EY -PVQFrPvf30UkRGNPl8eC7afZtCNk9MLllIATAzBr5Z1i+psV7MmgBKpbZ4B0TnhR -GXNT60QaCJ9RfUuc2z7RHJNo9XTn3Q44X7TFj+P3jHOWzTf8y6Mz6saTy2bugIUy -AfRgRgq/bB8hRjrazg55FIlrMv7dr3J0cIuqmaHfsw7Q2ECMCXW8oQXMBzfuIT0n -sG4u0oVxdNx4OdHsAubGjjwNDhxJvN5j8+YFqZMu03i8LbyamTwsrZg2C3QrRUq8 -SujQEEB+AmO0lpuJ24FsOOYVSYCpLy2ugrKOr2NUqbiBKZs8uBh6RGACfunMZlEw -4BntohiO7oZ5gjvhGZNUEqzMChw7knvVjZ+DkhFk9yE4qIL7VsJSUNI2ZJym/Xeq -jr/oT8CpP8/mFZspa6DFciPfhGLQqKcaZZohL7461pOYWY5C2vsJNR2ucBZzTFvD -BiN/rMnIGFrxUscCCje6RLmrsZ3Lb7bfhB3W6kwzLRfr/XEygAzx6S2mlOM34kqF -HFpKrg9TtLIpYLAKAIfuNbrLaNP1UKh7iLarhDz/qDcvRka/qJTzLD3eLeGXefAP -KjJ1S7s= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem deleted file mode 100644 index d20bda0ce715..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEAx0fwOrokPV/14eYr5nY0a+V1a9Q+/n1PULuqIQK6rQbibC6+ -rdkQU0vcOjyAH8Y0EjEhZa5vwmPoW3wTEoJvTXh9t6N5GAoKBD4T/+t+AOOVuccF -BZ6N/PFPPzRyu6sM0j9Xsyl31zb87qNZmYU9PGlXRjDq2VHGWWZfEd0T3CEcPVw0 -n1ylZDJbh+noJPIi3TsXSHUUufA3kiNPRbJFnt3CGeZCZjKpyqCG2EiQlVXmDztE -RZSAYvs6XVciP6/SxtK9KZUXc/xyXr2zorVNsvd0tbPTVjDRJDhVNTxSZrBd2cza -iRQsko9NMvx3LIz1C2MCMWHanL9enEzZndz+hXoL7syyS4zbhmiyRnNWp7b/qetj -+2XRNw8qaxbsbDCYAy/fjYw2iM6GphX9FL+ytxe4ca6wB375tQjFDskC4PSelpeJ -i7DstmYQm3TRbE+naGwFBr8+DLo/VfFeaUfpxXoY5rDxDUrE3LOY9HvM01rbzBSE -nUvjWQJKYs9LOD1fAy4JOUadWRtleTp0TyzDk4CtJ54HORlWsX+zmAqxR2wSfLS/ -dzFzUKbTRXgaVRYY1BqlRXM/up2xV9jbBS2Z4IUEbPZXo+E0xt9zUgGLrCqoVcKW -BX/ArFwnL2LlOkZZAmI69zwfp19VA2tHlBptMzmm3/Ttw9Vcx0AaQ7s0h0MCAwEA -AQKCAgBd61qd4vKHdn1kzNztzdHg9BDGFA7oU9iYvQlua2HdgDwgLluxhXa7Oyp8 -y9y6nOgXls4dpPuJCxsMWsqGU7DvOxVNAh9lI/4ah8NXPv5wntIG73Q/dL2Ic5Yc -vLRCHFh7klzb1HRlmsXUFmp4/yGgIil+rDlS2MZ5hdTSj3X3ricoCBfI75oHQfB/ -es7s8q1ZxKqxfHSbOUqHdlq7B0zmla8QE8RBdCkvlT5YGsMBjq1RimYfwOBNRgf4 -y8MZbt0Q1WtPeLPH9zdTzWYnDfmjmhqINEsq+PDoeCA4aciQGxjwOCrapgZnwF/q -4q+r8HbgufXjnjGw5ERLt7BsRSYynoJiTWQ3p/wZ2VLpjFtxYxoJ5/qpQvbZMgGS -Yu3FZNC6cnbOs+JWbdm7Kg93N24cBrGdk/KdEE6lz6uQq07FTSqLtPEQWePzBiuA -1wfP78b2AH6vyJKq36EfMCJK2i7rpwtNz7d9NI5kiLRDB7gesqC94WJ+psEu+ErO -w9DbTV3xdOPs4FGGrR41Hbo8emrk6smhb8+VK2odggi8i2CLAkYupMsuobBlX3CL -hyJPfWDv1aREJ1w7zWVQlJkvp5zR0oXZXpfFxjpj7Ypbp7BKxmh5+WYj8msFDfaD -8VQ+pqgPpdl6zElEq9m5koHjsHH57fMeJQ59HiWpWFur+kQx4QKCAQEA0Jnvbm7R -WypbPDInkIoPDIhyP9Pqv+wMzNfYEnVEG0GhEU/H5aE20a+Dm6u0bsmPm5lCSQsu -EvylTSL3yumQZMincNIUXcPYb2Qye/ZzJnMIibCqwMKQqi4HxCXprWhiEoGPum8A -fN0bTGgMYfM6JZ/Dh1eGsEvemeW+5tn5xZF4Lfp/vkT8v4FuHDydUF/lIx7F5MMi -VteS0hHnR1DuvxHqtysf0wy2l61LFr7mQCMYTNEyFB3ZfXqpxJmFmCqPbr4PQsIm -2rqIDw+13eeoyDpJJkdi+yzHkAYDOdAsur0vOQvK/Zj1QKz9qmC1O6L4BN5yp265 -vjSE4Orvo7btEQKCAQEA9I/afLw6lHUJ4FVL0p7dH15JSFjt7nmGHocE7Wf6Yp3G -vMp+PdGyoJ2KEQB2unnQZK1gZqUuRQLannjNl7fsIiIhHgHxMBCIiylwSUVnP868 -u9/fpJV/cSGze2zF0WAttIgXKNtXG7xMntcY2k+SAe0qjqX494KT0NGnznySt2nU -A1YlkXm6u3KCOJrBKfbtiHXFoH39sA+ihuPiV7xcETS2ZrFdAX9M422p4yDHqe/0 -dTe18wIxJNiEX4xp/HRE//cuQ5dw/Z/QmNrzgWxHbOmXVR5C90vIJRuYY9xz0tDP -LMnifSKfnG16l2gqg7zb8xsxYqSGndXWKPAeiq3/EwKCAQEAhCWQbWgcjmFFzNuE -/ubG48yoe9DW/OAft8Dg68iH7bBkxd/BpbG8VZeXiw16T1i29f5f5IAFnxeX7EbD -rTLLO1113V3ocwH3YZGa/bbBedETzo4xjc1z8asZVmQiJa1ju4+CKrvZFkDH415i -wcZgxqbwKhQDijl1+g52Ii5iMYuXE6GGPVXcu8DVrWOk0N7+/IGpIeOQJG2KYDPh -TOdzZ22FQKY8EeoS3gF0+SLUIDtbUIaR7/Z86iXD2HzdCemkVaZnaoYuMRBL0ybD -sqDn5nguEObWSII0pgN5Fa3QODhS6xOSc5brfx5X0BBVn0L9VbBJ99GIL3t71jRe -vVrL0QKCAQB+jUYZT+ncUqgWruy6g7yW89pmFqagxb/SYjn5g9m8WDq0DPDAmped -p4f/fkbx/gEJZ/I/i3BjA7QPVyHERcdqblDGz2h4X8XYhUv2jnR8P0XIznNTHo1B -BJh04PeIfgWIqveZC8+KqajYdSQGLDC40Ho6MMahha9p2mPEZRAi2x97zoNIQT6Q -qxOZqPMV/RIzkAYBI9E33w9ST/AbSHw35xgQEe23zaEC+wdzYc4QMPxF/9smcdbu -YyA0tVtO6PefoNAO5/nvNFjkEED7kwVu5X2K7Urn3w4lrZ7w5e4FhEoAukN6T4Va -lAhg+uUtIHiM12B50/tZB4N30bFsP9eDAoIBAHc7ppfpo1aDK3bDr6zTSOU4Mn1l -XrfhBJHDy2Wt9WkvWtcCtXr3sDpthaChueV+mGoKvfgWyzUoauO6HDDsRYriqaQB -cXclVjyy+3atY32Opz9rnWefQkbgTOQ+oQgOzEFhxNS+11Omc6ZZ9s31N6TZi/Yz -rgXzhGrr73DkV6uwiiwkvP8vJxg8AMWKorDIm1myr9wwlK5ogDKSku1DM/y1gvlt -4EA39fqURyqxN9o5Yq+8K1+a/smjGx95M+P8Nke4bMs1+lb7bBXbMaVpC6DLqj8B -eleOZ7adY2mS0CBuf0PNkJRNDwF1B5VDmGBJLubUtGLuUUoEyUbv66WfnUw= ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem deleted file mode 100644 index ef030f740567..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem +++ /dev/null @@ -1,29 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIE9DCCAt6gAwIBAgIQb58oJ+9SvWUCcYWA+L1oiTALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw -NTUwMFoXDTE4MDUxMDIwNTUwMFowEzERMA8GA1UEChMIUXVpY2tUTFMwggIiMA0G -CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDDmOL3EhBm4So3agPMmF0z1+/nPlrE -xoG7x0HYPk5CP3PF3TNVk3ArBPkMzge0/895a4ZEb9j+LUQEjOZa/ZwuLmSjfJSt -9xTXI1ldp8KasyzQZjC33/bUj7FGxGzgbHyJrGGBoH2W5HdswH4WzhCnGTslyiDo -VN4hklJ7gr+Geq3TPf8Eji+1L71MOrUyoNp7BaQBQT/gKxK0nV+ZuSk6eaiu+om7 -slp3x4bc21o7eIMmNXggJP6p9fMDctnioKhAPcm+5ADiFYSjivLeUQ85VkMTpmdU -yvq6ziK3Ls6erD+S3xLvcHYAaeu84qLd7qdPwkHMTQsDpO4vPMIwL8piMzZV+kwL -Bq+5xk5//FwnQH0pSo2Nr4vRn+DITZc3GKyGUJQoOUgAdfGNskTt8GXa4IsHn5iw -zr12vGaxb//GDm0RLHnh7NVbD8xxDHIJq+fJNFb7MdXa8v31PYebkWuaPhYt6HQC -I/D81zwcJIOGfzNITS2ifM5tvMaUXireo4pLC2v2aSY6RrPq1owlB6jGFwGwZSAF -O6rxSqWO1gLfhJLzqcw/NjWnO7nCZEs/iKgAa22K2CtTt3dDMTvSBYKdkRe/FYQC -MCa7MFJSaH85pYRzoDN4IuVpvROrtuQmlI47oZzb64uCPoA4A8AN+k8iysqITsgK -1m8ePPXhbu4YlwIDAQABozUwMzAOBgNVHQ8BAf8EBAMCAKAwEwYDVR0lBAwwCgYI -KwYBBQUHAwIwDAYDVR0TAQH/BAIwADALBgkqhkiG9w0BAQsDggIBALSgrCdEQd3I -vb/FNkNZkAwdjfBD6j7ZtPBwvjEiiyNTx9hOLBGvbey7kr0HtW0KkLWsdRmCc+3z -ev9I5VjDOtpiqrvuAA1wRBaL3UzGyj/eFjPJpvkfJi8zjkIZ2y18QG3yJ6Eqy6dD -0aIQAHl9hkXMOVrf364gf0p7EoOGtSlfQ56yIGDPTFKKiy+Al0S42p17lhI4coz9 -zGXE1/SiNeZgdsk4zHDqhzzBp8foZuSL1sGcIXHkG8RtqZ1WvCyIPYRyIjIKZcXd -JCEM//EbgDzQ7VE/jm+hIlYfPjM7fmUzsfii+bIrp/0HGEU3HN++LsA6eQOwWPa/ -PrxKPP36EVXb72QK8C3lmz6y+CHhuuAm0C1b1qmYVEs4eRE21S8eB2l0KUlfOecf -xZ1LWp1agKt6fGqRgcsR3/qO27l8W7hlbFNPeOTgr6NQQkEMRW5OxbnZ58ULXqr3 -gWh8Na3D4+3j53035UBBQUMmeeFfWCvtr5n0+6BTAi62Cwwu9QQQBM/2f9/9K+B7 -cW0xPYtczm+VwJL6/rDtNN9xPWitxab1dkZp2XcHG3VWtYvE2R2EtEoKvvCLPggx -zcafsZfcD1wlvtQF7YjykGJnMa0SB0GBl9SQtvGc8PkP39yXHqXZhIoo3fp4qm9v -RfbdpOr8p/Ks34ZqQPukFwpM1s/6aicF ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem deleted file mode 100644 index 5aee3ea5fffa..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAw5ji9xIQZuEqN2oDzJhdM9fv5z5axMaBu8dB2D5OQj9zxd0z -VZNwKwT5DM4HtP/PeWuGRG/Y/i1EBIzmWv2cLi5ko3yUrfcU1yNZXafCmrMs0GYw -t9/21I+xRsRs4Gx8iaxhgaB9luR3bMB+Fs4Qpxk7Jcog6FTeIZJSe4K/hnqt0z3/ -BI4vtS+9TDq1MqDaewWkAUE/4CsStJ1fmbkpOnmorvqJu7Jad8eG3NtaO3iDJjV4 -ICT+qfXzA3LZ4qCoQD3JvuQA4hWEo4ry3lEPOVZDE6ZnVMr6us4ity7Onqw/kt8S -73B2AGnrvOKi3e6nT8JBzE0LA6TuLzzCMC/KYjM2VfpMCwavucZOf/xcJ0B9KUqN -ja+L0Z/gyE2XNxishlCUKDlIAHXxjbJE7fBl2uCLB5+YsM69drxmsW//xg5tESx5 -4ezVWw/McQxyCavnyTRW+zHV2vL99T2Hm5Frmj4WLeh0AiPw/Nc8HCSDhn8zSE0t -onzObbzGlF4q3qOKSwtr9mkmOkaz6taMJQeoxhcBsGUgBTuq8UqljtYC34SS86nM -PzY1pzu5wmRLP4ioAGttitgrU7d3QzE70gWCnZEXvxWEAjAmuzBSUmh/OaWEc6Az -eCLlab0Tq7bkJpSOO6Gc2+uLgj6AOAPADfpPIsrKiE7ICtZvHjz14W7uGJcCAwEA -AQKCAgBmIvmxpp8l+cH/ub5OIenZXpMJn4fqZPXtxjjd4HshIN0ln0JlF15lOG2M -gDGKFGKUts8gAX/ACocQETtgnDnn65XlwPIqfXFGflD2FNoLyjBGinY6LhtIF9is -aXmpHz1Q7tDjzZiHKLor8cBlzCjp+MToEMpqR5bO1Qd5M2cro/gM7Lyz9kN3S3x/ -x9BCpbgwsVtYxGfEePmFkwAO159tx4WMCYvOlW2kSm5j+a7+iwmA9D7MGkVZHvNN -A7Y/H0F8ekdVBN5pMG9Yrv/vk0ht2lugcS5YGr4eufFq0mhWdv+jhBTxLzqPMMBG -m9oMJcj8XyXYtwpfVsqBpCqK2wnEnv4Kf0rZzBU706nI2mjPXx3dL+5qo8uQJKNp -mxoS7vmHV5RIJgtdvyzGFHjdfu1leowhV+Jy9jWzMw4wlnmlxsfDECf5RoSf2XGt -SMGJb0dbJKae+W4MfNUFsgAWMZk3h3KF8AHHe44OpDbQeoh3JLnkWSG0oS3CR0ch -68TzCy0SZZEZ9IS+I6o5WVpwWfReCQ5NjaKipWcpiJvxg+Dc3GG3QcVXVz2gGrJh -g9v0v6eyeOJ32QGvvP7THFBjpWeeHlXT8Yz6hFcPrvErEZ029TEmhg8aLWBGfsR5 -F1bazdbqvOSEB9vBAAaddNnEDG9Rl8EmC4WdsnVgYUw1J7gfQQKCAQEA9DKjD9eN -CrUl/2YfSm2WaFhYci74XcHDVeAXN2SbOyKbMIqk3aOFQNRAsLRnwPkdiLtuqeDK -BafrfLTCORHfFdYKnUzmuekESNLckN9VyLztgqOqNAv3LD6GmSHBaJEnUyniLxOL -k0wMEBIsEQw7Fb4blM2REYJ3ZzMFmgpRGnIX8KcxhW9XgSrnqMLO0w6mVxjo7xzd -813nCcNrGhySM/EzKYtTNHy2JZmMH5QFHaIj67KklO7VeEZX5U+TKveBEt4rmHqs -Ndqf/djSs8vu1xse82pVRxMXX2mhDLmwjUjPgWYxUL92jTiyJhE7GxpVB/yHgF1J -Ecb47MDahoNKkQKCAQEAzQzvCOA77IQpGO117GcMqcjzwEUhTytojFBT+s5mHfzk -dYr5TyN86LQ7/GktNoJ5oRvD9UGRSul1OGneivqtWj6mv6/Zvfzacx8NXY4MYFs1 -nEr3Gr7orVFIzD2x7nMPG2G6+J6hZ1rhpnZ9Hprf5G41sHIJxHJ9wTYSUAmFh8bv -FiJqF90bSq/E5hgjphtX6wZWeZYspzc/5+IrJ/I0nqoxV3rjUy234zlzKJAV10sV -5oVgxLLQsUujkHp/Da+ij2aTv1Za8y3PTJ7MAHYgdpa5l/4U9MnPUEB2REBCI1NN -TqxnViwD0xgsvxfb79UzruLJIYOCKvfOumlutXM0pwKCAQBUIMXQhWAP2kyW6mXJ -TGvO0vDVlZz3H/Pdt/AHo19fRhLU7E7UFKupo/YNanl8H9au7nO3jrvKqwkT02o+ -IwwKB81sV7v9PGu/cvWN64MwPvZMVXojqCOlWH0icGCjV66Glh1YPpGNU1ushbYs -wVvxp6b04sUhlSLxqMA7S2aZh8j7nX4QDEXHODLLDyIV0Cw6QViuV/GXEDiyQmK5 -gjJUNrp7i4ZExNozpeyCTIpepSde4hKVRJrCbumFFJ8M5GvRRj0asNh3TTRlTbd5 -Pb6w2KUXEwECFW+t7UQQkEBkzDrAx6YhvXRoPqoRN0p3keDNeZBtBrZPq47CccZX -JRAhAoIBAQCJ/DgnGu54XP9i/PksGrSU1Nvi+SJPKoDyW2QIFTj22SXMS7c1oEYA -OrlbRFPeqLK8zfhyZKsnZC8zxVqy37okTqDbwbSfezZt3emamWqOtRJAmNnsr6fY -aii4+JNySQ9Td9LgV69549iRso7EN6iPCfMrR7J29izWBlMQdTfchOyDUqleYbZp -7hpsVLY4o5HoYJ10uLBX3oAsxTARc5YhZ5pIqjOr18o1KIXsN/napXaZaAwUkdiK -VsI9CZHSXezg30Bxs+UEXEFx6DKT5Oo3o3pFZAAqMlxGPvrXNv7K0tXlKXNos7nn -Jg+GkMG6hRiAibCb0umXjKcbHrQXeu1lAoIBAQDcRBsy6cSQXMSu6+PyroH+2DvR -4fuiMfSrUNjv+9K8gtjYLetrZUvRuFT3A/KzDrALKyTFTGJk3YlpTaC5iNKd+QK8 -6RBJRYeYV16fpX/2ak/8MgfB2gdW//pE0eFjw+qakcUXmo957m7dUXbOrw1VNAET -LVBeVnml+2FUj0sTXGwHKcINPR78PWZ8i1ka9DptnKLBNeA+x+OMkCA88RJJegSk -/rgDDV52z4fJHQJh9TZ7zLAXxGgDFYLGPTrdeT+D/owuPXF+SCP4pMtVnwbQgH9G -dfQ9bb7G14vAeu/kEkFdGFEreS09BOTRbTfzFjFdDvSV4JyOXe9i/sUDxf9R ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem deleted file mode 100644 index 53ce07422fb0..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem +++ /dev/null @@ -1,29 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFCTCCAvOgAwIBAgIQPjclBRGzhznCybQzYRQTyjALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw -NTQ1NloXDTE4MDUxMDIwNTQ1NlowJzERMA8GA1UEChMIUXVpY2tUTFMxEjAQBgNV -BAMTCWxvY2FsaG9zdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALBe -C9O6es+mStDowUd1kiM59VkinzzdHgE24LvKmGxQ6fDnnT8S9L7iyzoxcJWlvSHu -pfyZWvij0ZIyRZ288XemTEFYq25RK0IBGGdvYz9OqT2R3lblBQrXDjSi9WG16sGx -60MGhM2egGMqFQ5DBfT16IKw00+RjFgCVzJ8T64Lzw82E0e7d6hl39SPybY+uvrt -SID60hYGmXoOdaiC9qquivks67BZprGNfORrvyJNrCFI6oKUFWHrQ1PpGd2tOwJN -1P3gkkS8pVlAif6ZQkAf+zuKu+l4j5tKxGlJAkJsafVJDLOxBKutUj5msha0g6uJ -gFXUe0+G8hkNcEjd8XqUUCwIOY3pdv4WsydKBk3uH9zMnYolw53k1q0ObvoY1NXf -beMxHQAtDi7nfQGlae9cuuOSymy95WuvzfhZFKdPWUe8lKN9QXFIWVoCFnOm8T3P -+FNCUE+p8DIWkal6Ul9THi/Kz4p7twyrUp1LwT5EtSaJ3iGAmB9I+8/1vmZT3lPi -nX8P+iVGM5yOUnptrsFm0bUcJWRD6iaTK1KxpH+Is4h2kiUiSz1tC/9bKaJYN2o9 -oy7q7+ZVfHSmIxLo8ZFYsaZBcXi96cKuuPMR3X4ISPwKDqP5irxU/QbI+YQBMshg -G4b0BNoMZ50g30r3Hcsifw4pzPQF0RDMOBeCiOi3AgMBAAGjNjA0MA4GA1UdDwEB -/wQEAwIAoDAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDALBgkq -hkiG9w0BAQsDggIBAFuS/VrMNUwEMyUIktDyna5ExYh/FDOE+YEYf8tsX7dSMhRK -wE560/AcVZcbKKAZOnZ/262a++8tparsQt+bXBJ2so6YUqsFDNdOLCI2aShjWDRe -TNhqmLIO3FNsLRKp96WHVz+jFoiECsoYfKn0jgqTqxx+7nWFqgBaNSlF5cbCgLCH -jQV1uQhzsw/Mh/32hXAidkv/nLeLf7FbKq08hgthtoP+XstlzZ5BxkPodjb8XWXG -DSS49SWX971GHa1apwMKfxVGSppxn18ZwEmW1BUfQBNxtMytqA9DK3+xuoUdXkB0 -iJbm3Jc10JSRju8iyL121Xt6f8O33paVz/ndDJIWztUOjnItc89rxHsINPt5+cUt -jix8ohwmHGDrK7ZooXBvotvmGT/xhPr2eHUAG8JuSJ/Cr09UUOwUEigz4CfgJOHm -XukdzjOkb4r7lhNmVeGqrjRol1W0Wsc1NGH++J6xdkIeQ+i23kHwFHfQWV/J69tm -rOn2N+qijtmbIy9YfVcrFDtUtEAzXylZ2StCVQNofd0M7tXNdrUL8yAFwlrhWGJV -wsSP++1xH2Ie6Diupy8z6rbP383HmnmVPU/UecgLrlX2lEpt/UZkkX1Xm+6PhrrT -HDeeULvqtUP3PD8wS0C873Pl9GXOKISqf0HKEIDUAVZhQOsGFqiZH0388M4L ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem deleted file mode 100644 index 636dc8287f6d..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKgIBAAKCAgEAsF4L07p6z6ZK0OjBR3WSIzn1WSKfPN0eATbgu8qYbFDp8Oed -PxL0vuLLOjFwlaW9Ie6l/Jla+KPRkjJFnbzxd6ZMQVirblErQgEYZ29jP06pPZHe -VuUFCtcONKL1YbXqwbHrQwaEzZ6AYyoVDkMF9PXogrDTT5GMWAJXMnxPrgvPDzYT -R7t3qGXf1I/Jtj66+u1IgPrSFgaZeg51qIL2qq6K+SzrsFmmsY185Gu/Ik2sIUjq -gpQVYetDU+kZ3a07Ak3U/eCSRLylWUCJ/plCQB/7O4q76XiPm0rEaUkCQmxp9UkM -s7EEq61SPmayFrSDq4mAVdR7T4byGQ1wSN3xepRQLAg5jel2/hazJ0oGTe4f3Myd -iiXDneTWrQ5u+hjU1d9t4zEdAC0OLud9AaVp71y645LKbL3la6/N+FkUp09ZR7yU -o31BcUhZWgIWc6bxPc/4U0JQT6nwMhaRqXpSX1MeL8rPinu3DKtSnUvBPkS1Jone -IYCYH0j7z/W+ZlPeU+Kdfw/6JUYznI5Sem2uwWbRtRwlZEPqJpMrUrGkf4iziHaS -JSJLPW0L/1spolg3aj2jLurv5lV8dKYjEujxkVixpkFxeL3pwq648xHdfghI/AoO -o/mKvFT9Bsj5hAEyyGAbhvQE2gxnnSDfSvcdyyJ/DinM9AXREMw4F4KI6LcCAwEA -AQKCAgEAnrHg/oD7ZMEC7PuifoRCHMRYCf5nPkLQbtNMYG2pvT0JY6VlDo4l/2Te -7NvzrBPYHSI55RKwkq4FMwFdNtP+imTulJYOm1MaE2gc52WI7jv/eNE6OQIWCWz8 -8Uv4dBVWyTcos8S31rTaXWBOVejlAUgMERy+5wfWOpLQlzLYF4m0pMFJk/AReUtB -nmhLXlsPsB22cag/RWZmzzcXk6tT/LzVe+R5ptLkdTsUuAxjjaBKVCDiMuDAZL1m -dah3h8oKIMab8l0SABumxKqYAKkyvbSJQUhSUYAT5+3c0cfJ6q7WoMk8TqvnwfpQ -2Klbcaa4G6+79H8e/a41RWmcMVTTpLKmwzx/iMLPswLnTFbWYCsLSsml3OpmXPhG -CKdbIWMvNMBfahZmnCP2pNcZBVY1/k/lEw25ehtnWqA7HplawT6V3gk/Bzz+3e3R -XEpioZF70ipdW5Pb3OG/tKSNDvRRjqLPk9UWlQzmedzu7XN28V/blw/CBVcMAcc0 -njwAledTuqv/wQ67dtbXdcxSPZbV/Rq7y3OmpgK6RWLIFzzpOPW5gULqUZfrnxtv -StxVnlZXhFoymodFobTi7AYibsLaXLkunZWXEwFwdtLfFHznfHq/rHfBmna1lcKW -MgWRqsbaoCsqHC1nc0E4llFkn3zqGYgMQNBeqNfX6cIPI/eQzPECggEBAOk0TP8N -edIFENOrzUtpH1fB3k15heeA84SeBhj8t/xrphR3o+IVO/GtMtq9hVLeYFVPwWCi -Mmy4KhwNUOtFeCSX4MbpiXvoPEjL3QF+Sv95HsEWsT1iBQIN4aoV0ZSv48YsRczs -tLjr96hADLTMfpCwyRq9r8XVF/hnx7vqOoOC/J1kteRhjOWRnutFpdAMfkFgzUa9 -1unmDHsDifcT+vpxief9Q9zK9xMYvYmwFkBUjOlhC7WchZC20nrwvM+A2mMBpeLB -WSRWsYeOqW8zcQNGdWuXXMKxsYHwv9tXbANVWxs1gz4x7BxcFoN5poIFrnT+eImY -EwhGrKR6jZsKF00CggEBAMGbdZU0+yvxL2tAul5RGAqv9xhdUV4eg8warTQ8/RWt -8Vef2wllBYnP48rXNDovb7ZNOjMBdjIWZ2zq2McMtHqpzP+zWQWaNT8/7Zi24JTL -y4G75kZdGgTPG2Y71seZoZGxfOu4gf7cLKOqxiHYrNDHEDl5Pi13tJD/8qf6hYm6 -K3yALSv+QlM3mk+5oueKQ7Lj9rV81YomYSV5+K+WhszhvLmuxv0necOLKapeBWvL -GQ5038yAq3PFdu0HXzyA6L8YdusP1d3sqwQvLbi8KAMXJCeT6WZXGYgX2Rjfbuih -ZHUaE7Ac0EsJfMuOowSkS7oXuT81k64ngCoq5KZC5hMCggEBAKYkt9JiZG8HYuSb -GsjmHQllup5RvN+hVF0gRFHbAq2YeBtO3Xg+DpXxAjErIuhWPCWri6bwB6LDVmTj -68milaTke6TbTzLy0rg+Xbcppf766LlCFIYZ5l1/TE3j+4vGAC347sW/wkWY/7lj -4GmS43zsJmqhx6/XUJuOPJOZnZSCZr0vuhL6mOoZZDJUTXy62dx0PetvZsT/O9cM -P2fDWWTCLTEVlBqik4KMdsS4qjGsyzOeCzyZReNDDRO/nZTsRSqSSwARJhQom5Rr -RDVQXeyqbw93KAQhmshroBSB5Rc+4YiyCE3wPTo7NWL38XPi3lbF0VSd/rk/uNH5 -6hcSCmUCggEAIPHjQFCTrRaNiyKolAQYozjuQyceAXYP11tyvcDjEB1ZRB/flemq -15iYmpukN4J67/qUPLmy8zL8xnvwB28SBw195MUQEPP8u5aVR7dW3/sN1jWzKaYO -F2Nmti7YjX6HD9Oz/iiXdlbhAbi9nmTQg3ZcPGt1OSd1gncLQ6pNrvIPFFB7X1EU -2DRN/eMI5X2Rp49DG/7yF2AQh+AJgVeL+LEw/CfRlKJzBeNYY7U8Fuuoh907eAEt -K7YeVpc6jYEiGeJ/2eAH9IuhTkT48saRyHTXoiR5QwDvR0lHmAPtS4irH4Igd4dv -qlUi90B+XPvYJwKCc08aojf2hzZlUiVwIQKCAQEAraCoWea8hLFchxmAiBt7joIg -nNK7a3LOHYxT1gB9H+PoVqTmzGVTeZpD8Jnis/UHmDhRYuUGqvFIefjAWbz0jJAN -t6RMAozENCG1PoeXHf1gt2wspv14kza+8jSdpzNrzZgPZdb7Wh1UEqUkiRYwn87f -C7DHknqCj9S2qq0DFXYz15JNPVrbvD+ZLBFJhTAjppS9TuYQVLf8JPYHpLRio/9A -dMsyOz1VA2RRYN0u/u4ccxiN45K3PbVMCeDPbWXNm8G75YKQ7LnIuehMB1qkZy6N -MOnNGp3l/ZkFK0JsW/pZqTQ2FqSkb0+ttTFApFI3qB04sc4s0uKPI9fa0OQtUw== ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem deleted file mode 100644 index 08c393d1b009..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem +++ /dev/null @@ -1,30 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFETCCAvugAwIBAgIQCnqSQalw9ytL5bHLgHZe+jALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw -NTQ1OFoXDTE4MDUxMDIwNTQ1OFowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV -BAMTDWxvY2FscmVnaXN0cnkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC -AQC9gvT3cwz0Ih9+7Ilv5lc15HsEiSmEMh4nOMZrSaamKgf/ydCiGo3DQapr/XDK -FHMLKq68AxwfOlzmEFQ4d9umpPMQ2+4GBr0VG23ppGtQApIPHgD06S0/CeHmDIXN -FXcKybPX/9KbgNkXBWbbJkJy0EcsdP8VJD50Q2WH89nvgEYJNFuKEELD3iGY6bBF -jeDTle5jYA7CgBKvD2avn31g24Qhxn8n8/BdYO/U0kw0qmoy1veLOjCAW0os0jkM -NlKrFpyHEWNj5B3X6UgSn8EGQaVbDq17PrQwlHJYU4nih0TnD1OwvBnFnd27pXjr -68eGA6Zc2BbUnhNGhppWHZ46LpPxpIbafSOH3ES3N/MZAfcUKIUntLlWE2xCQgFV -TW95WeVtP/r1aWgIHu0E2Jb2eHCE+qXYqJxSU7S4DcknmmcTS69hzyHs+92Ec+7Q -m0aQFZ0dyPoYPwXMgZpTAIuXEGg/FKC1fiS/deTW37DyvB2jppehKW3RJY3uso7R -o9vs6DJx1OdU5XEq9R3n7op61N7PK8Wxmn7TVYHEZHkITVvtucZZd1FNTOrOJaNJ -UnE+FuPK1Mrff+jz666Ru4zQL0CondOamX3QR5tuNK6MTqFs87wKY25qsqz7cS27 -kHW+r7UNWbJY3/UQhaPZM78zCZa2IL1nBFUjsFvEA4rtYwIDAQABozowODAOBgNV -HQ8BAf8EBAMCAKAwDAYDVR0TAQH/BAIwADAYBgNVHREEETAPgg1sb2NhbHJlZ2lz -dHJ5MAsGCSqGSIb3DQEBCwOCAgEAHVGMyoyX4lRzWCDkUjrXkrDZzuv03M2ojW2Q -UL61ejMkTWQW8R4gKrcPHAOJAPKVfGEVOrQH3ZMyxV2HnWrJ7egrn65zOzmLbWSh -O7gdpL6YYjBr218fqJn/8HadXZa4k70JyympYOLojeWSLy3KP03U+y7AMcdE1uG6 -6HJI54ZjBoW/nEyWmMh/mfMz8EN+Mgek48Z9AVaOswbtHtDIXN7XO0jbB3DbY5Yh -prVqVLYAz4sCchGTadj+aEChF5sJkKREDvAew/njC0WGS2TmMJ+V1uVhXV6354mr -edk79YvdwzwDgeYArkprahMtn9eu1aSTfUXsmM5OP5tR4gyFV1kUmTPY1yUd/yO+ -638wV0mWtGbbf6j8dUKeUBCyt2qGg8J80OUeFdvdHMswtaUq951NApX44BinPkbK -moBVQByZ5OEcmMidFC9SqYSUwTQ7uNyWeguhCXav+l3x900YlKnUQgRUZntPwXjs -yc7MXv0j0E86Gme6G1O02zamwkRgr3qOTHu2oQOow/a24fM4HASayLR0Kegt0sh3 -rzk0HRF1mGonf1Ecyyj/3LpHVsgYSckwtJoZLOqtDMn+CKtOCEByssQfD+E9Qe07 -qMyvcwpXUpfqe3ZERbJ10m98Z88VeK/XGt9ptq7HY47n1KL6lx3oyXwZIw8pq928 -89dcqL0= ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem deleted file mode 100644 index 205511afb41a..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEAvYL093MM9CIffuyJb+ZXNeR7BIkphDIeJzjGa0mmpioH/8nQ -ohqNw0Gqa/1wyhRzCyquvAMcHzpc5hBUOHfbpqTzENvuBga9FRtt6aRrUAKSDx4A -9OktPwnh5gyFzRV3Csmz1//Sm4DZFwVm2yZCctBHLHT/FSQ+dENlh/PZ74BGCTRb -ihBCw94hmOmwRY3g05XuY2AOwoASrw9mr599YNuEIcZ/J/PwXWDv1NJMNKpqMtb3 -izowgFtKLNI5DDZSqxachxFjY+Qd1+lIEp/BBkGlWw6tez60MJRyWFOJ4odE5w9T -sLwZxZ3du6V46+vHhgOmXNgW1J4TRoaaVh2eOi6T8aSG2n0jh9xEtzfzGQH3FCiF -J7S5VhNsQkIBVU1veVnlbT/69WloCB7tBNiW9nhwhPql2KicUlO0uA3JJ5pnE0uv -Yc8h7PvdhHPu0JtGkBWdHcj6GD8FzIGaUwCLlxBoPxSgtX4kv3Xk1t+w8rwdo6aX -oSlt0SWN7rKO0aPb7OgycdTnVOVxKvUd5+6KetTezyvFsZp+01WBxGR5CE1b7bnG -WXdRTUzqziWjSVJxPhbjytTK33/o8+uukbuM0C9AqJ3Tmpl90EebbjSujE6hbPO8 -CmNuarKs+3Etu5B1vq+1DVmyWN/1EIWj2TO/MwmWtiC9ZwRVI7BbxAOK7WMCAwEA -AQKCAgEArwqno2uEGnbuKnjmVRInmWKpcb4TN8Rm74lUVEKaB76o1s0cxK3MJP6h -H8/e/vg2bqkE7indLsbkiaepcuLaYijXTcomJzDQMw+7zOOOLz/Aku/+qDg8D47c -NXV5nLzn0HIPiEIF0JYJbmcR4veKxqu0Ic8K0QdCHHcn75P/x2Tuy4+twW9Vi76/ -v5KRuxzZ/fTtVKKj32kWWNXb3fltgCoh+GR0jH2XlVh1DVkVBEwnfT/rM5ESvWwU -riOah7ohT1+6QlOAPwKzwfr6FCG000eNKPb8q+p12q0ylHzMzgxtSxJwFb0X/Nzc -snaboyWLjDAQ2I7LP6WmXizznvkKbE9PjW6UGYQ+2XApqp+Hn8tSC5I/gIDlBOOa -psJ4fkRjr8n5+CbHbGmQG736hZcZY/z10TtOQbxeeeuri6oDQ62D4Z07GpWCG2EG -sUakaytZnJkIN79PpfthPZwtStlG0KVs0i5wggH/iP2h0yAmvJ64ZRIqdvuE/aBn -sdfRRlYUqmFOJsVQgtUWGKGS4WIxrGaclzT1TNxCKdiAk0glXe3sDtvBni6qDW07 -iJzEXxrsLw6MiCDhHfDeae5JYeJXK0HlCfYHXgRmEnDFTGw8rBzwz3eXvPqZ5YNt -j+31uHSwQjgOgEgSrXeTmRfLZsytKqndhBB/yBFmzZNrswXGackCggEBAMN5RSdW -t+WWl8ghDGz/CN1oRjnk298/6L7ijluKGRgG+igwBEy+5m1EGPJT+Y5LEH4TiQJe -Oc2XjQuM7zABX7JWWk1cL8Zlv3kcmR0lg4BWs7wDkoU1HYRkMP57vubtxFzFOsNa -momivEniZ/eonHm3yv0VHeenH9j3mhJ3mVDIpkH+7uhn3++c0zYh96NkjfQi1/jF -P35eSAt7FgHDOt37fWXwtGeYFRN4P19ZUNiIvZwT6Q1gmegRO8BYoW6cSbLWe5Cp -abaULds46+mjM4zJhCZRFkdWHbzP4bZHocSmwGsqcpABJ6SASTVim02GGhBIt1nj -fkqa10X1c5Sqis0CggEBAPgxFKSHccfIJ6yht2HJjysRLN/IHlO9hDcpCWUrISN/ -hxu1uxfNGmUkd0H8zDO/O+QAJXLE8PPPB77pJniIJ8kK4swwsfufN6bNV9XJldjA -o4vXnYt9Mpuky9cugD8LocUgWQzzKY5Y875TC4s3ldzyKQVm0NO+Wz1U3gfjogEC -d7PhTk7Ba/ZjVGtL7HuZxlL+/TgZklMks2ulSTW2y8aqVJxaZXv0H0NX/+fpDHYw -iljr+iqbiqZvjrzySryb0XWMtzP9oyDEXTXrWnG+kOIZW3BZ9FLxT+Te7zZ2PUbK -vTkObsKxc8WVHIYgkt/OwWSwbYLre5nvFPvgEFbQuO8CggEAeZTlUXmbul63m5AK -xYS/w88G1x2lMK/0mT4bY4562zoDwJlVI1MdydqwVZGryDiiUnjeIC3xcBISdZu8 -bjR8jFUvp6xuPs2ska0bA0kBCQNkmc3zBY2rBVy4KKFZdRNwrm8yhK3HL1KcIKyF -FEK4yPBrfozy49JMecxP9aqUHu4eky/4828gl04JBUONXwC9VpuRj7dILdaAozt0 -zbXb2JSDQ7O60jCC83A4oprQMU6j+P9dVqe+Mtz9OD8ocb8eC/FiO/FTwm9aMl+u -RMzw1GHHI3oODGLg7j6y2oilcsZxKnblePJu8N+mKWFizY5aicRg3rUkKU00Ftx7 -fn2xBQKCAQB7w7Xgie5SStyF+KrC58kuF8WB3oBJEAOjoiIeQhCnbAvK5KfkqZHV -CAc0b8TAtUc/XldOUSk6222oZQmbJ4J3fac1Xb8TlAUjd9iqMnk3+nBT5vSYP5mC -Bf7kUjr/tWQ5MfVWQNfjNTZvHWhvRwvDfzq3h9rxDEbhYbXKx1fdGwboO51aJpgY -6NWLH/RQepFsh91sIUxXi8CxGF5Wm84oRn4k7esXkdgZNAPX+N4O/guvZhV9M81D -S/QpAsYEIcuky8P7+Cplx6YXokKa4AXNyglQEHuG9PD7V7SAOxw5dhZAIpNXIThz -OfVcaVf0pVzJQjWKCLW9QHz9UXG0aScfAoIBACdr3exVMUaMOtrAnf2NXj3hecgg -WsWRBOOaSW5wXGt1JNlfYS4zwViafIwy31DNuMg22rj5Mq0TYMtuNYto5RoLSXeB -uupUrENEBnt7JFrwI/NyWG0uYMM3G2MtGHGYooaT9+++wT96QxJZr5fwFYF1ddf6 -5tFeKtNt5VM0wWBHO1voUhQ0TCaooatJjMuAB0+WbvwniKxmdbqQDzY+6myBBUVo -gBJ0JxhxakLm1XGFHDtPCsAAHX/uZ4CvH2uyWqAlx6iwGXd0wwEGrbIRB/BundxR -oaJWswU4FIPAgOpy2LEJKnvzhcmVFtZWD5sFXA1/83QvpceLTFTD5uioBPU= ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd deleted file mode 100644 index 4e55de816968..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/nginx/test.passwd +++ /dev/null @@ -1 +0,0 @@ -testuser:$apr1$YmLhHjm6$AjP4z8J1WgcUNxU8J4ue5. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh deleted file mode 100644 index 87dfa6301924..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env bash -set -e -set -x - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -source helpers.bash - -# Root directory of Distribution -DISTRIBUTION_ROOT=$(cd ../..; pwd -P) - -volumeMount="" -if [ "$DOCKER_VOLUME" != "" ]; then - volumeMount="-v ${DOCKER_VOLUME}:/var/lib/docker" -fi - -dockerMount="" -if [ "$DOCKER_BINARY" != "" ]; then - dockerMount="-v ${DOCKER_BINARY}:/usr/local/bin/docker" -else - DOCKER_BINARY=docker -fi - -# Image containing the integration tests environment. -INTEGRATION_IMAGE=${INTEGRATION_IMAGE:-distribution/docker-integration} - -if [ "$1" == "-d" ]; then - start_daemon - shift -fi - -TESTS=${@:-.} - -# Make sure we upgrade the integration environment. -docker pull $INTEGRATION_IMAGE - -# Start a Docker engine inside a docker container -ID=$(docker run -d -it --privileged $volumeMount $dockerMount \ - -v ${DISTRIBUTION_ROOT}:/go/src/github.com/docker/distribution \ - -e "DOCKER_GRAPHDRIVER=$DOCKER_GRAPHDRIVER" \ - -e "EXEC_DRIVER=$EXEC_DRIVER" \ - ${INTEGRATION_IMAGE} \ - ./run_engine.sh) - -# Stop container on exit -trap "docker rm -f -v $ID" EXIT - -# Wait for it to become reachable. -tries=10 -until docker exec "$ID" docker version &> /dev/null; do - (( tries-- )) - if [ $tries -le 0 ]; then - echo >&2 "error: daemon failed to start" - exit 1 - fi - sleep 1 -done - -# If no volume is specified, transfer images into the container from -# the outer docker instance -if [ "$DOCKER_VOLUME" == "" ]; then - # Make sure we have images outside the container, to transfer to the container. - # Not much will happen here if the images are already present. - docker-compose pull - docker-compose build - - # Transfer images to the inner container. - for image in "$INTEGRATION_IMAGE" registry:0.9.1 dockerintegration_nginx dockerintegration_registryv2; do - docker save "$image" | docker exec -i "$ID" docker load - done -fi - -# Run the tests. -docker exec -it "$ID" sh -c "./test_runner.sh $TESTS" - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_engine.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_engine.sh deleted file mode 100644 index 4a7b94e02ee5..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_engine.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh -set -e -set -x - -DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-overlay} -EXEC_DRIVER=${EXEC_DRIVER:-native} - -# Set IP address in /etc/hosts for localregistry -IP=$(ifconfig eth0|grep "inet addr:"| cut -d: -f2 | awk '{ print $1}') -echo "$IP localregistry" >> /etc/hosts - -sh install_certs.sh localregistry - -docker --daemon --log-level=panic \ - --storage-driver="$DOCKER_GRAPHDRIVER" --exec-driver="$EXEC_DRIVER" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh deleted file mode 100644 index c6da03140c60..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/run_multiversion.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash - -# Run the integration tests with multiple versions of the Docker engine - -set -e -set -x - -source helpers.bash - -if [ `uname` = "Linux" ]; then - tmpdir_template="$TMPDIR/docker-versions.XXXXX" -else - # /tmp isn't available for mounting in boot2docker - tmpdir_template="`pwd`/../../../docker-versions.XXXXX" -fi - -tmpdir=`mktemp -d "$tmpdir_template"` -trap "rm -rf $tmpdir" EXIT - -if [ "$1" == "-d" ]; then - start_daemon -fi - -# Released versions - -versions="1.6.0 1.6.1 1.7.0 1.7.1" - -for v in $versions; do - echo "Extracting Docker $v from dind image" - binpath="$tmpdir/docker-$v/docker" - ID=$(docker create dockerswarm/dind:$v) - docker cp "$ID:/usr/local/bin/docker" "$tmpdir/docker-$v" - - echo "Running tests with Docker $v" - DOCKER_BINARY="$binpath" DOCKER_VOLUME="$DOCKER_VOLUME" DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" ./run.sh - - # Cleanup. - docker rm -f "$ID" -done - -# Latest experimental version - -echo "Extracting Docker master from dind image" -binpath="$tmpdir/docker-master/docker" -docker pull dockerswarm/dind-master -ID=$(docker create dockerswarm/dind-master) -docker cp "$ID:/usr/local/bin/docker" "$tmpdir/docker-master" - -echo "Running tests with Docker master" -DOCKER_BINARY="$binpath" DOCKER_VOLUME="$DOCKER_VOLUME" ./run.sh - -# Cleanup. -docker rm -f "$ID" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh deleted file mode 100644 index 0a628238cdfc..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/test_runner.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -TESTS=${@:-.} - -function execute() { - >&2 echo "++ $@" - eval "$@" -} - -execute time docker-compose build - -execute docker-compose up -d - -# Run the tests. -execute time bats -p $TESTS diff --git a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats b/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats deleted file mode 100644 index 8b7ae287175d..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/contrib/docker-integration/tls.bats +++ /dev/null @@ -1,102 +0,0 @@ -# Registry host name, should be set to non-localhost address and match -# DNS name in nginx/ssl certificates and what is installed in /etc/docker/cert.d -hostname="localregistry" - -image="hello-world:latest" - -# Login information, should match values in nginx/test.passwd -user="testuser" -password="passpassword" -email="distribution@docker.com" - -function setup() { - docker pull $image -} - -# skip basic auth tests with Docker 1.6, where they don't pass due to -# certificate issues -function basic_auth_version_check() { - run sh -c 'docker version | fgrep -q "Client version: 1.6."' - if [ "$status" -eq 0 ]; then - skip "Basic auth tests don't support 1.6.x" - fi -} - -# has_digest enforces the last output line is "Digest: sha256:..." -# the input is the name of the array containing the output lines -function has_digest() { - filtered=$(echo "$1" |sed -rn '/[dD]igest\: sha(256|384|512)/ p') - [ "$filtered" != "" ] -} - -function login() { - run docker login -u $user -p $password -e $email $1 - [ "$status" -eq 0 ] - # First line is WARNING about credential save - [ "${lines[1]}" = "Login Succeeded" ] -} - -@test "Test valid certificates" { - docker tag -f $image $hostname:5440/$image - run docker push $hostname:5440/$image - [ "$status" -eq 0 ] - has_digest "$output" -} - -@test "Test basic auth" { - basic_auth_version_check - login $hostname:5441 - docker tag -f $image $hostname:5441/$image - run docker push $hostname:5441/$image - [ "$status" -eq 0 ] - has_digest "$output" -} - -@test "Test TLS client auth" { - docker tag -f $image $hostname:5442/$image - run docker push $hostname:5442/$image - [ "$status" -eq 0 ] - has_digest "$output" -} - -@test "Test TLS client with invalid certificate authority fails" { - docker tag -f $image $hostname:5443/$image - run docker push $hostname:5443/$image - [ "$status" -ne 0 ] -} - -@test "Test basic auth with TLS client auth" { - basic_auth_version_check - login $hostname:5444 - docker tag -f $image $hostname:5444/$image - run docker push $hostname:5444/$image - [ "$status" -eq 0 ] - has_digest "$output" -} - -@test "Test unknown certificate authority fails" { - docker tag -f $image $hostname:5445/$image - run docker push $hostname:5445/$image - [ "$status" -ne 0 ] -} - -@test "Test basic auth with unknown certificate authority fails" { - run login $hostname:5446 - [ "$status" -ne 0 ] - docker tag -f $image $hostname:5446/$image - run docker push $hostname:5446/$image - [ "$status" -ne 0 ] -} - -@test "Test TLS client auth to server with unknown certificate authority fails" { - docker tag -f $image $hostname:5447/$image - run docker push $hostname:5447/$image - [ "$status" -ne 0 ] -} - -@test "Test failure to connect to server fails to fallback to SSLv3" { - docker tag -f $image $hostname:5448/$image - run docker push $hostname:5448/$image - [ "$status" -ne 0 ] -} - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/coverpkg.sh b/Godeps/_workspace/src/github.com/docker/distribution/coverpkg.sh new file mode 100644 index 000000000000..25d419ae8265 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/coverpkg.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# Given a subpackage and the containing package, figures out which packages +# need to be passed to `go test -coverpkg`: this includes all of the +# subpackage's dependencies within the containing package, as well as the +# subpackage itself. +DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" +echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go index ae581f159b26..31d821bba72d 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest.go @@ -1,21 +1,14 @@ package digest import ( - "bytes" "fmt" "hash" "io" - "io/ioutil" "regexp" "strings" - - "github.com/docker/docker/pkg/tarsum" ) const ( - // DigestTarSumV1EmptyTar is the digest for the empty tar file. - DigestTarSumV1EmptyTar = "tarsum.v1+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - // DigestSha256EmptyTar is the canonical sha256 digest of empty data DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ) @@ -29,18 +22,21 @@ const ( // // sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc // -// More important for this code base, this type is compatible with tarsum -// digests. For example, the following would be a valid Digest: -// -// tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b -// // This allows to abstract the digest behind this type and work only in those // terms. type Digest string // NewDigest returns a Digest from alg and a hash.Hash object. func NewDigest(alg Algorithm, h hash.Hash) Digest { - return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil))) + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return Digest(fmt.Sprintf("%s:%x", alg, p)) } // NewDigestFromHex returns a Digest from alg and a the hex encoded digest. @@ -79,41 +75,15 @@ func FromReader(rd io.Reader) (Digest, error) { return Canonical.FromReader(rd) } -// FromTarArchive produces a tarsum digest from reader rd. -func FromTarArchive(rd io.Reader) (Digest, error) { - ts, err := tarsum.NewTarSum(rd, true, tarsum.Version1) - if err != nil { - return "", err - } - - if _, err := io.Copy(ioutil.Discard, ts); err != nil { - return "", err - } - - d, err := ParseDigest(ts.Sum(nil)) - if err != nil { - return "", err - } - - return d, nil -} - // FromBytes digests the input and returns a Digest. -func FromBytes(p []byte) (Digest, error) { - return FromReader(bytes.NewReader(p)) +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) } // Validate checks that the contents of d is a valid digest, returning an // error if not. func (d Digest) Validate() error { s := string(d) - // Common case will be tarsum - _, err := ParseTarSum(s) - if err == nil { - return nil - } - - // Continue on for general parser if !DigestRegexpAnchored.MatchString(s) { return ErrDigestInvalidFormat diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go index 94c6175f4db9..afb4ebf63242 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digest_test.go @@ -1,8 +1,6 @@ package digest import ( - "bytes" - "io" "testing" ) @@ -13,21 +11,6 @@ func TestParseDigest(t *testing.T) { algorithm Algorithm hex string }{ - { - input: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - algorithm: "tarsum+sha256", - hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - { - input: "tarsum.dev+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - algorithm: "tarsum.dev+sha256", - hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - { - input: "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", - algorithm: "tarsum.v1+sha256", - hex: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", - }, { input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", algorithm: "sha256", @@ -97,25 +80,3 @@ func TestParseDigest(t *testing.T) { } } } - -// A few test cases used to fix behavior we expect in storage backend. - -func TestFromTarArchiveZeroLength(t *testing.T) { - checkTarsumDigest(t, "zero-length archive", bytes.NewReader([]byte{}), DigestTarSumV1EmptyTar) -} - -func TestFromTarArchiveEmptyTar(t *testing.T) { - // String of 1024 zeros is a valid, empty tar file. - checkTarsumDigest(t, "1024 zero bytes", bytes.NewReader(bytes.Repeat([]byte("\x00"), 1024)), DigestTarSumV1EmptyTar) -} - -func checkTarsumDigest(t *testing.T, msg string, rd io.Reader, expected Digest) { - dgst, err := FromTarArchive(rd) - if err != nil { - t.Fatalf("unexpected error digesting %s: %v", msg, err) - } - - if dgst != expected { - t.Fatalf("unexpected digest for %s: %q != %q", msg, dgst, expected) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go index 9a10539c6bcc..f3105a45b697 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/digester.go @@ -2,6 +2,7 @@ package digest import ( "crypto" + "fmt" "hash" "io" ) @@ -13,10 +14,9 @@ type Algorithm string // supported digest types const ( - SHA256 Algorithm = "sha256" // sha256 with hex encoding - SHA384 Algorithm = "sha384" // sha384 with hex encoding - SHA512 Algorithm = "sha512" // sha512 with hex encoding - TarsumV1SHA256 Algorithm = "tarsum+v1+sha256" // supported tarsum version, verification only + SHA256 Algorithm = "sha256" // sha256 with hex encoding + SHA384 Algorithm = "sha384" // sha384 with hex encoding + SHA512 Algorithm = "sha512" // sha512 with hex encoding // Canonical is the primary digest algorithm used with the distribution // project. Other digests may be used but this one is the primary storage @@ -85,11 +85,18 @@ func (a Algorithm) New() Digester { } } -// Hash returns a new hash as used by the algorithm. If not available, nil is -// returned. Make sure to check Available before calling. +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. func (a Algorithm) Hash() hash.Hash { if !a.Available() { - return nil + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) } return algorithms[a].New() @@ -106,6 +113,22 @@ func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { return digester.Digest(), nil } +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.New() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + // TODO(stevvooe): Allow resolution of verifiers using the digest type and // this registration system. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go index 278c50e011cd..f64b0db32b4c 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/doc.go @@ -1,7 +1,7 @@ // Package digest provides a generalized type to opaquely represent message // digests and their operations within the registry. The Digest type is // designed to serve as a flexible identifier in a content-addressable system. -// More importantly, it provides tools and wrappers to work with tarsums and +// More importantly, it provides tools and wrappers to work with // hash.Hash-based digests with little effort. // // Basics @@ -16,17 +16,7 @@ // sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc // // In this case, the string "sha256" is the algorithm and the hex bytes are -// the "digest". A tarsum example will be more illustrative of the use case -// involved in the registry: -// -// tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b -// -// For this, we consider the algorithm to be "tarsum+sha256". Prudent -// applications will favor the ParseDigest function to verify the format over -// using simple type casts. However, a normal string can be cast as a digest -// with a simple type conversion: -// -// Digest("tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b") +// the "digest". // // Because the Digest type is simply a string, once a valid Digest is // obtained, comparisons are cheap, quick and simple to express with the diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/set.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/set.go index 3fac41b40fc7..4b9313c1aeac 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/set.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/set.go @@ -22,7 +22,7 @@ var ( // may be easily referenced by easily referenced by a string // representation of the digest as well as short representation. // The uniqueness of the short representation is based on other -// digests in the set. If digests are ommited from this set, +// digests in the set. If digests are omitted from this set, // collisions in a larger set may not be detected, therefore it // is important to always do short representation lookups on // the complete set of digests. To mitigate collisions, an diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum.go deleted file mode 100644 index 9effeb2e858d..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum.go +++ /dev/null @@ -1,70 +0,0 @@ -package digest - -import ( - "fmt" - - "regexp" -) - -// TarsumRegexp defines a regular expression to match tarsum identifiers. -var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+") - -// TarsumRegexpCapturing defines a regular expression to match tarsum identifiers with -// capture groups corresponding to each component. -var TarsumRegexpCapturing = regexp.MustCompile("(tarsum)(.([a-z0-9]+))?\\+([a-zA-Z0-9]+):([A-Fa-f0-9]+)") - -// TarSumInfo contains information about a parsed tarsum. -type TarSumInfo struct { - // Version contains the version of the tarsum. - Version string - - // Algorithm contains the algorithm for the final digest - Algorithm string - - // Digest contains the hex-encoded digest. - Digest string -} - -// InvalidTarSumError provides informations about a TarSum that cannot be parsed -// by ParseTarSum. -type InvalidTarSumError string - -func (e InvalidTarSumError) Error() string { - return fmt.Sprintf("invalid tarsum: %q", string(e)) -} - -// ParseTarSum parses a tarsum string into its components of interest. For -// example, this method may receive the tarsum in the following format: -// -// tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e -// -// The function will return the following: -// -// TarSumInfo{ -// Version: "v1", -// Algorithm: "sha256", -// Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", -// } -// -func ParseTarSum(tarSum string) (tsi TarSumInfo, err error) { - components := TarsumRegexpCapturing.FindStringSubmatch(tarSum) - - if len(components) != 1+TarsumRegexpCapturing.NumSubexp() { - return TarSumInfo{}, InvalidTarSumError(tarSum) - } - - return TarSumInfo{ - Version: components[3], - Algorithm: components[4], - Digest: components[5], - }, nil -} - -// String returns the valid, string representation of the tarsum info. -func (tsi TarSumInfo) String() string { - if tsi.Version == "" { - return fmt.Sprintf("tarsum+%s:%s", tsi.Algorithm, tsi.Digest) - } - - return fmt.Sprintf("tarsum.%s+%s:%s", tsi.Version, tsi.Algorithm, tsi.Digest) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum_test.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum_test.go deleted file mode 100644 index 894c25ab3181..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/tarsum_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package digest - -import ( - "reflect" - "testing" -) - -func TestParseTarSumComponents(t *testing.T) { - for _, testcase := range []struct { - input string - expected TarSumInfo - err error - }{ - { - input: "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", - expected: TarSumInfo{ - Version: "v1", - Algorithm: "sha256", - Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", - }, - }, - { - input: "", - err: InvalidTarSumError(""), - }, - { - input: "purejunk", - err: InvalidTarSumError("purejunk"), - }, - { - input: "tarsum.v23+test:12341234123412341effefefe", - expected: TarSumInfo{ - Version: "v23", - Algorithm: "test", - Digest: "12341234123412341effefefe", - }, - }, - - // The following test cases are ported from docker core - { - // Version 0 tarsum - input: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - expected: TarSumInfo{ - Algorithm: "sha256", - Digest: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - }, - { - // Dev version tarsum - input: "tarsum.dev+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - expected: TarSumInfo{ - Version: "dev", - Algorithm: "sha256", - Digest: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - }, - } { - tsi, err := ParseTarSum(testcase.input) - if err != nil { - if testcase.err != nil && err == testcase.err { - continue // passes - } - - t.Fatalf("unexpected error parsing tarsum: %v", err) - } - - if testcase.err != nil { - t.Fatalf("expected error not encountered on %q: %v", testcase.input, testcase.err) - } - - if !reflect.DeepEqual(tsi, testcase.expected) { - t.Fatalf("expected tarsum info: %v != %v", tsi, testcase.expected) - } - - if testcase.input != tsi.String() { - t.Fatalf("input should equal output: %q != %q", tsi.String(), testcase.input) - } - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go index f8c75b53de4c..9af3be1341ec 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers.go @@ -3,9 +3,6 @@ package digest import ( "hash" "io" - "io/ioutil" - - "github.com/docker/docker/pkg/tarsum" ) // Verifier presents a general verification interface to be used with message @@ -27,70 +24,10 @@ func NewDigestVerifier(d Digest) (Verifier, error) { return nil, err } - alg := d.Algorithm() - switch alg { - case "sha256", "sha384", "sha512": - return hashVerifier{ - hash: alg.Hash(), - digest: d, - }, nil - default: - // Assume we have a tarsum. - version, err := tarsum.GetVersionFromTarsum(string(d)) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - - // TODO(stevvooe): We may actually want to ban the earlier versions of - // tarsum. That decision may not be the place of the verifier. - - ts, err := tarsum.NewTarSum(pr, true, version) - if err != nil { - return nil, err - } - - // TODO(sday): Ick! A goroutine per digest verification? We'll have to - // get the tarsum library to export an io.Writer variant. - go func() { - if _, err := io.Copy(ioutil.Discard, ts); err != nil { - pr.CloseWithError(err) - } else { - pr.Close() - } - }() - - return &tarsumVerifier{ - digest: d, - ts: ts, - pr: pr, - pw: pw, - }, nil - } -} - -// NewLengthVerifier returns a verifier that returns true when the number of -// read bytes equals the expected parameter. -func NewLengthVerifier(expected int64) Verifier { - return &lengthVerifier{ - expected: expected, - } -} - -type lengthVerifier struct { - expected int64 // expected bytes read - len int64 // bytes read -} - -func (lv *lengthVerifier) Write(p []byte) (n int, err error) { - n = len(p) - lv.len += int64(n) - return n, err -} - -func (lv *lengthVerifier) Verified() bool { - return lv.expected == lv.len + return hashVerifier{ + hash: d.Algorithm().Hash(), + digest: d, + }, nil } type hashVerifier struct { @@ -105,18 +42,3 @@ func (hv hashVerifier) Write(p []byte) (n int, err error) { func (hv hashVerifier) Verified() bool { return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) } - -type tarsumVerifier struct { - digest Digest - ts tarsum.TarSum - pr *io.PipeReader - pw *io.PipeWriter -} - -func (tv *tarsumVerifier) Write(p []byte) (n int, err error) { - return tv.pw.Write(p) -} - -func (tv *tarsumVerifier) Verified() bool { - return tv.digest == Digest(tv.ts.Sum(nil)) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers_test.go b/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers_test.go index 5ee79f3472de..c342d6e7c84d 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/digest/verifiers_test.go @@ -3,22 +3,14 @@ package digest import ( "bytes" "crypto/rand" - "encoding/base64" "io" - "os" - "strings" "testing" - - "github.com/docker/distribution/testutil" ) func TestDigestVerifier(t *testing.T) { p := make([]byte, 1<<20) rand.Read(p) - digest, err := FromBytes(p) - if err != nil { - t.Fatalf("unexpected error digesting bytes: %#v", err) - } + digest := FromBytes(p) verifier, err := NewDigestVerifier(digest) if err != nil { @@ -30,43 +22,6 @@ func TestDigestVerifier(t *testing.T) { if !verifier.Verified() { t.Fatalf("bytes not verified") } - - tf, tarSum, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating tarfile: %v", err) - } - - digest, err = FromTarArchive(tf) - if err != nil { - t.Fatalf("error digesting tarsum: %v", err) - } - - if digest.String() != tarSum { - t.Fatalf("unexpected digest: %q != %q", digest.String(), tarSum) - } - - expectedSize, _ := tf.Seek(0, os.SEEK_END) // Get tar file size - tf.Seek(0, os.SEEK_SET) // seek back - - // This is the most relevant example for the registry application. It's - // effectively a read through pipeline, where the final sink is the digest - // verifier. - verifier, err = NewDigestVerifier(digest) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - lengthVerifier := NewLengthVerifier(expectedSize) - rd := io.TeeReader(tf, lengthVerifier) - io.Copy(verifier, rd) - - if !lengthVerifier.Verified() { - t.Fatalf("verifier detected incorrect length") - } - - if !verifier.Verified() { - t.Fatalf("bytes not verified") - } } // TestVerifierUnsupportedDigest ensures that unsupported digest validation is @@ -84,79 +39,11 @@ func TestVerifierUnsupportedDigest(t *testing.T) { } } -// TestJunkNoDeadlock ensures that junk input into a digest verifier properly -// returns errors from the tarsum library. Specifically, we pass in a file -// with a "bad header" and should see the error from the io.Copy to verifier. -// This has been seen with gzipped tarfiles, mishandled by the tarsum package, -// but also on junk input, such as html. -func TestJunkNoDeadlock(t *testing.T) { - expected := Digest("tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473") - junk := bytes.Repeat([]byte{'a'}, 1024) - - verifier, err := NewDigestVerifier(expected) - if err != nil { - t.Fatalf("unexpected error creating verifier: %v", err) - } - - rd := bytes.NewReader(junk) - if _, err := io.Copy(verifier, rd); err == nil { - t.Fatalf("unexpected error verifying input data: %v", err) - } -} - -// TestBadTarNoDeadlock runs a tar with a "bad" tar header through digest -// verifier, ensuring that the verifier returns an error properly. -func TestBadTarNoDeadlock(t *testing.T) { - // TODO(stevvooe): This test is exposing a bug in tarsum where if we pass - // a gzipped tar file into tarsum, the library returns an error. This - // should actually work. When the tarsum package is fixed, this test will - // fail and we can remove this test or invert it. - - // This tarfile was causing deadlocks in verifiers due mishandled copy error. - // This is a gzipped tar, which we typically don't see but should handle. - // - // From https://registry-1.docker.io/v2/library/ubuntu/blobs/tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473 - const badTar = ` -H4sIAAAJbogA/0otSdZnoDEwMDAxMDc1BdJggE6D2YZGJobGBmbGRsZAdYYGBkZGDAqmtHYYCJQW -lyQWAZ1CqTnonhsiAAAAAP//AsV/YkEJTdMAGfFvZmA2Gv/0AAAAAAD//4LFf3F+aVFyarFeTmZx -CbXtAOVnMxMTXPFvbGpmjhb/xobmwPinSyCO8PgHAAAA///EVU9v2z4MvedTEMihl9a5/26/YTkU -yNKiTTDsKMt0rE0WDYmK628/ym7+bFmH2DksQACbIB/5+J7kObwiQsXc/LdYVGibLObRccw01Qv5 -19EZ7hbbZudVgWtiDFCSh4paYII4xOVxNgeHLXrYow+GXAAqgSuEQhzlTR5ZgtlsVmB+aKe8rswe -zzsOjwtoPGoTEGplHHhMCJqxSNUPwesbEGbzOXxR34VCHndQmjfhUKhEq/FURI0FqJKFR5q9NE5Z -qbaoBGoglAB+5TSK0sOh3c3UPkRKE25dEg8dDzzIWmqN2wG3BNY4qRL1VFFAoJJb5SXHU90n34nk -SUS8S0AeGwqGyXdZel1nn7KLGhPO0kDeluvN48ty9Q2269ft8/PTy2b5GfKuh9/2LBIWo6oz+N8G -uodmWLETg0mW4lMP4XYYCL4+rlawftpIO40SA+W6Yci9wRZE1MNOjmyGdhBQRy9OHpqOdOGh/wT7 -nZdOkHZ650uIK+WrVZdkgErJfnNEJysLnI5FSAj4xuiCQNpOIoNWmhyLByVHxEpLf3dkr+k9KMsV -xV0FhiVB21hgD3V5XwSqRdOmsUYr7oNtZXTVzyTHc2/kqokBy2ihRMVRTN+78goP5Ur/aMhz+KOJ -3h2UsK43kdwDo0Q9jfD7ie2RRur7MdpIrx1Z3X4j/Q1qCswN9r/EGCvXiUy0fI4xeSknnH/92T/+ -fgIAAP//GkWjYBSMXAAIAAD//2zZtzAAEgAA` - expected := Digest("tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473") - - verifier, err := NewDigestVerifier(expected) - if err != nil { - t.Fatalf("unexpected error creating verifier: %v", err) - } - - rd := base64.NewDecoder(base64.StdEncoding, strings.NewReader(badTar)) - - if _, err := io.Copy(verifier, rd); err == nil { - t.Fatalf("unexpected error verifying input data: %v", err) - } - - if verifier.Verified() { - // For now, we expect an error, since tarsum library cannot handle - // compressed tars (!!!). - t.Fatalf("no error received after invalid tar") - } -} - // TODO(stevvooe): Add benchmarks to measure bytes/second throughput for -// DigestVerifier. We should be tarsum/gzip limited for common cases but we -// want to verify this. +// DigestVerifier. // -// The relevant benchmarks for comparison can be run with the following +// The relevant benchmark for comparison can be run with the following // commands: // // go test -bench . crypto/sha1 -// go test -bench . github.com/docker/docker/pkg/tarsum // diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile deleted file mode 100644 index 89ee3d29cca0..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM docs/base:latest -MAINTAINER Mary Anthony (@moxiegirl) - -RUN svn checkout https://github.com/docker/docker/trunk/docs /docs/content/engine -RUN svn checkout https://github.com/docker/compose/trunk/docs /docs/content/compose -RUN svn checkout https://github.com/docker/machine/trunk/docs /docs/content/machine -RUN svn checkout https://github.com/docker/distribution/trunk/docs /docs/content/registry -RUN svn checkout https://github.com/kitematic/kitematic/trunk/docs /docs/content/kitematic -RUN svn checkout https://github.com/docker/tutorials/trunk/docs /docs/content/tutorials -RUN svn checkout https://github.com/docker/opensource/trunk/docs /docs/content/opensource - -ENV PROJECT=registry - -# To get the git info for this repo -COPY . /src - -COPY . /docs/content/$PROJECT/ diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile b/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile deleted file mode 100644 index 021e8f6e5eaf..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/Makefile +++ /dev/null @@ -1,55 +0,0 @@ -.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate - -# env vars passed through directly to Docker's build scripts -# to allow things like `make DOCKER_CLIENTONLY=1 binary` easily -# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these -DOCKER_ENVS := \ - -e BUILDFLAGS \ - -e DOCKER_CLIENTONLY \ - -e DOCKER_EXECDRIVER \ - -e DOCKER_GRAPHDRIVER \ - -e TESTDIRS \ - -e TESTFLAGS \ - -e TIMEOUT -# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds - -# to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) -DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) - -# to allow `make DOCSPORT=9000 docs` -DOCSPORT := 8000 - -# Get the IP ADDRESS -DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''") -HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)") -HUGO_BIND_IP=0.0.0.0 - -GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) -DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) -DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH)) - - -DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE - -# for some docs workarounds (see below in "docs-build" target) -GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) - -default: docs - -docs: docs-build - $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) - -docs-draft: docs-build - $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) - - -docs-shell: docs-build - $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash - - -docs-build: -# ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files -# echo "$(GIT_BRANCH)" > GIT_BRANCH -# echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET -# echo "$(GITCOMMIT)" > GITCOMMIT - docker build -t "$(DOCKER_DOCS_IMAGE)" . diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/apache.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/apache.md deleted file mode 100644 index ae33910c015c..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/apache.md +++ /dev/null @@ -1,213 +0,0 @@ - - -# Authenticating proxy with apache - -## Use-case - -People already relying on an apache proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline. - -Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal. - -### Alternatives - -If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](deploying.md#native-basic-auth). - -### Solution - -With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry. - -While we use a simple htpasswd file as an example, any other apache authentication backend should be fairly easy to implement once you are done with the exemple. - -We also implement push restriction (to a limited user group) for the sake of the exemple. Again, you should modify this to fit your mileage. - -### Gotchas - -While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. - -Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues. - -## Setting things up - -Read again [the requirements](recipes.md#requirements). - -Ready? - -Run the following script: - -``` -mkdir -p auth -mkdir -p data - -# This is the main apache configuration you will use -cat < auth/httpd.conf -LoadModule headers_module modules/mod_headers.so - -LoadModule authn_file_module modules/mod_authn_file.so -LoadModule authn_core_module modules/mod_authn_core.so -LoadModule authz_groupfile_module modules/mod_authz_groupfile.so -LoadModule authz_user_module modules/mod_authz_user.so -LoadModule authz_core_module modules/mod_authz_core.so -LoadModule auth_basic_module modules/mod_auth_basic.so -LoadModule access_compat_module modules/mod_access_compat.so - -LoadModule log_config_module modules/mod_log_config.so - -LoadModule ssl_module modules/mod_ssl.so - -LoadModule proxy_module modules/mod_proxy.so -LoadModule proxy_http_module modules/mod_proxy_http.so - -LoadModule unixd_module modules/mod_unixd.so - - - SSLRandomSeed startup builtin - SSLRandomSeed connect builtin - - - - User daemon - Group daemon - - -ServerAdmin you@example.com - -ErrorLog /proc/self/fd/2 - -LogLevel warn - - - LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined - LogFormat "%h %l %u %t \"%r\" %>s %b" common - - - LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio - - - CustomLog /proc/self/fd/1 common - - -ServerRoot "/usr/local/apache2" - -Listen 5043 - - - AllowOverride none - Require all denied - - - - - ServerName myregistrydomain.com - - SSLEngine on - SSLCertificateFile /usr/local/apache2/conf/domain.crt - SSLCertificateKeyFile /usr/local/apache2/conf/domain.key - - ## SSL settings recommandation from: https://raymii.org/s/tutorials/Strong_SSL_Security_On_Apache2.html - # Anti CRIME - SSLCompression off - - # POODLE and other stuff - SSLProtocol all -SSLv2 -SSLv3 -TLSv1 - - # Secure cypher suites - SSLCipherSuite EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH - SSLHonorCipherOrder on - - Header always set "Docker-Distribution-Api-Version" "registry/2.0" - Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" - RequestHeader set X-Forwarded-Proto "https" - - ProxyRequests off - ProxyPreserveHost on - - # no proxy for /error/ (Apache HTTPd errors messages) - ProxyPass /error/ ! - - ProxyPass /v2 http://registry:5000/v2 - ProxyPassReverse /v2 http://registry:5000/v2 - - - Order deny,allow - Allow from all - AuthName "Registry Authentication" - AuthType basic - AuthUserFile "/usr/local/apache2/conf/httpd.htpasswd" - AuthGroupFile "/usr/local/apache2/conf/httpd.groups" - - # Read access to authentified users - - Require valid-user - - - # Write access to docker-deployer only - - Require group pusher - - - - - -EOF - -# Now, create a password file for "testuser" and "testpassword" -docker run --entrypoint htpasswd httpd:2.4 -Bbn testuser testpassword > auth/httpd.htpasswd -# Create another one for "testuserpush" and "testpasswordpush" -docker run --entrypoint htpasswd httpd:2.4 -Bbn testuserpush testpasswordpush >> auth/httpd.htpasswd - -# Create your group file -echo "pusher: testuserpush" > auth/httpd.groups - -# Copy over your certificate files -cp domain.crt auth -cp domain.key auth - -# Now create your compose file - -cat < docker-compose.yml -apache: - image: "httpd:2.4" - hostname: myregistrydomain.com - ports: - - 5043:5043 - links: - - registry:registry - volumes: - - `pwd`/auth:/usr/local/apache2/conf - -registry: - image: registry:2 - ports: - - 127.0.0.1:5000:5000 - volumes: - - `pwd`/data:/var/lib/registry - -EOF -``` - -## Starting and stopping - -Now, start your stack: - - docker-compose up -d - -Login with a "push" authorized user (using `testuserpush` and `testpasswordpush`), then tag and push your first image: - - docker login myregistrydomain.com:5043 - docker tag ubuntu myregistrydomain.com:5043/test - docker push myregistrydomain.com:5043/test - -Now, login with a "pull-only" user (using `testuser` and `testpassword`), then pull back the image: - - docker login myregistrydomain.com:5043 - docker pull myregistrydomain.com:5043/test - -Verify that the "pull-only" can NOT push: - - docker push myregistrydomain.com:5043/test diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md deleted file mode 100644 index 39251760878b..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/architecture.md +++ /dev/null @@ -1,54 +0,0 @@ - - -# Architecture - -## Design -**TODO(stevvooe):** Discuss the architecture of the registry, internally and externally, in a few different deployment scenarios. - -### Eventual Consistency - -> **NOTE:** This section belongs somewhere, perhaps in a design document. We -> are leaving this here so the information is not lost. - -Running the registry on eventually consistent backends has been part of the -design from the beginning. This section covers some of the approaches to -dealing with this reality. - -There are a few classes of issues that we need to worry about when -implementing something on top of the storage drivers: - -1. Read-After-Write consistency (see this [article on - s3](http://shlomoswidler.com/2009/12/read-after-write-consistency-in-amazon.html)). -2. [Write-Write Conflicts](http://en.wikipedia.org/wiki/Write%E2%80%93write_conflict). - -In reality, the registry must worry about these kinds of errors when doing the -following: - -1. Accepting data into a temporary upload file may not have latest data block - yet (read-after-write). -2. Moving uploaded data into its blob location (write-write race). -3. Modifying the "current" manifest for given tag (write-write race). -4. A whole slew of operations around deletes (read-after-write, delete-write - races, garbage collection, etc.). - -The backend path layout employs a few techniques to avoid these problems: - -1. Large writes are done to private upload directories. This alleviates most - of the corruption potential under multiple writers by avoiding multiple - writers. -2. Constraints in storage driver implementations, such as support for writing - after the end of a file to extend it. -3. Digest verification to avoid data corruption. -4. Manifest files are stored by digest and cannot change. -5. All other non-content files (links, hashes, etc.) are written as an atomic - unit. Anything that requires additions and deletions is broken out into - separate "files". Last writer still wins. - -Unfortunately, one must play this game when trying to build something like -this on top of eventually consistent storage systems. If we run into serious -problems, we can wrap the storagedrivers in a shared consistency layer but -that would increase complexity and hinder registry cluster performance. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md deleted file mode 100644 index 30e1b5d9332f..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/building.md +++ /dev/null @@ -1,158 +0,0 @@ - - -# Building the registry source - -## Use-case - -This is useful if you intend to actively work on the registry. - -### Alternatives - -Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). - -People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. - -OS X users who want to run natively can do so following [the instructions here](osx-setup-guide.md). - -### Gotchas - -You are expected to know your way around with go & git. - -If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. - -## Build the development environment - -The first prerequisite of properly building distribution targets is to have a Go -development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) -for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the -environment. - -If a Go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - - go get github.com/docker/distribution/cmd/registry - -The above will install the source repository into the `GOPATH`. - -Now create the directory for the registry data (this might require you to set permissions properly) - - mkdir -p /var/lib/registry - -... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. - -The `registry` -binary can then be run with the following: - - $ $GOPATH/bin/registry --version - $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown - -> __NOTE:__ While you do not need to use `go get` to checkout the distribution -> project, for these build instructions to work, the project must be checked -> out in the correct location in the `GOPATH`. This should almost always be -> `$GOPATH/src/github.com/docker/distribution`. - -The registry can be run with the default config using the following -incantation: - - $ $GOPATH/bin/registry $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml - INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] debug server listening localhost:5001 - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - - go get github.com/tools/godep github.com/golang/lint/golint - -**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - - $ GOPATH=`godep path`:$GOPATH make - + clean - + fmt - + vet - + lint - + build - github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar - github.com/Sirupsen/logrus - github.com/docker/libtrust - ... - github.com/yvasiyarov/gorelic - github.com/docker/distribution/registry/handlers - github.com/docker/distribution/cmd/registry - + test - ... - ok github.com/docker/distribution/digest 7.875s - ok github.com/docker/distribution/manifest 0.028s - ok github.com/docker/distribution/notifications 17.322s - ? github.com/docker/distribution/registry [no test files] - ok github.com/docker/distribution/registry/api/v2 0.101s - ? github.com/docker/distribution/registry/auth [no test files] - ok github.com/docker/distribution/registry/auth/silly 0.011s - ... - + /Users/sday/go/src/github.com/docker/distribution/bin/registry - + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template - + binaries - -The above provides a repeatable build using the contents of the vendored -Godeps directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - - $ ./bin/registry -version - ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m - -### Developing - -The above approaches are helpful for small experimentation. If more complex -tasks are at hand, it is recommended to employ the full power of `godep`. - -The Makefile is designed to have its `GOPATH` defined externally. This allows -one to experiment with various development environment setups. This is -primarily useful when testing upstream bugfixes, by modifying local code. This -can be demonstrated using `godep` to migrate the `GOPATH` to use the specified -dependencies. The `GOPATH` can be migrated to the current package versions -declared in `Godeps` with the following command: - - godep restore - -> **WARNING:** This command will checkout versions of the code specified in -> Godeps/Godeps.json, modifying the contents of `GOPATH`. If this is -> undesired, it is recommended to create a workspace devoted to work on the -> _Distribution_ project. - -With a successful run of the above command, one can now use `make` without -specifying the `GOPATH`: - - make - -If that is successful, standard `go` commands, such as `go test` should work, -per package, without issue. - -### Optional build tags - -Optional [build tags](http://golang.org/pkg/go/build/) can be provided using -the environment variable `DOCKER_BUILDTAGS`. - -To enable the [Ceph RADOS storage driver](storage-drivers/rados.md) -(librados-dev and librbd-dev will be required to build the bindings): - - export DOCKER_BUILDTAGS='include_rados' diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md deleted file mode 100644 index 129f2e75af8f..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/configuration.md +++ /dev/null @@ -1,2166 +0,0 @@ - - -# Registry Configuration Reference - -The Registry configuration is based on a YAML file, detailed below. While it comes with sane default values out of the box, you are heavily encouraged to review it exhaustively before moving your systems to production. - -## Override specific configuration options - -In a typical setup where you run your Registry from the official image, you can specify a configuration variable from the environment by passing `-e` arguments to your `docker run` stanza, or from within a Dockerfile using the `ENV` instruction. - -To override a configuration option, create an environment variable named -`REGISTRY_variable` where *`variable`* is the name of the configuration option -and the `_` (underscore) represents indention levels. For example, you can -configure the `rootdirectory` of the `filesystem` storage backend: - - storage: - filesystem: - rootdirectory: /var/lib/registry - -To override this value, set an environment variable like this: - - REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere - -This variable overrides the `/var/lib/registry` value to the `/somewhere` -directory. - ->**NOTE**: It is highly recommended to create a base configuration file with which environment variables can be used to tweak individual values. Overriding configuration sections with environment variables is not recommended. - -## Overriding the entire configuration file - -If the default configuration is not a sound basis for your usage, or if you are having issues overriding keys from the environment, you can specify an alternate YAML configuration file by mounting it as a volume in the container. - -Typically, create a new configuration file from scratch, and call it `config.yml`, then: - - docker run -d -p 5000:5000 --restart=always --name registry \ - -v `pwd`/config.yml:/etc/docker/registry/config.yml \ - registry:2 - -You can (and probably should) use [this as a starting point](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). - -## List of configuration options - -This section lists all the registry configuration options. Some options in -the list are mutually exclusive. So, make sure to read the detailed reference -information about each option that appears later in this page. - - version: 0.1 - log: - level: debug - formatter: text - fields: - service: registry - environment: staging - hooks: - - type: mail - disabled: true - levels: - - panic - options: - smtp: - addr: mail.example.com:25 - username: mailuser - password: password - insecure: true - from: sender@example.com - to: - - errors@example.com - loglevel: debug # deprecated: use "log" - storage: - filesystem: - rootdirectory: /var/lib/registry - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - gcs: - bucket: bucketname - keyfile: /path/to/keyfile - rootdirectory: /gcs/object/name/prefix - s3: - accesskey: awsaccesskey - secretkey: awssecretkey - region: us-west-1 - bucket: bucketname - encrypt: true - secure: true - v4auth: true - chunksize: 5242880 - rootdirectory: /s3/object/name/prefix - rados: - poolname: radospool - username: radosuser - chunksize: 4194304 - swift: - username: username - password: password - authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth - tenant: tenantname - tenantid: tenantid - domain: domain name for Openstack Identity v3 API - domainid: domain id for Openstack Identity v3 API - insecureskipverify: true - region: fr - container: containername - rootdirectory: /swift/object/name/prefix - delete: - enabled: false - redirect: - disable: false - cache: - blobdescriptor: redis - maintenance: - uploadpurging: - enabled: true - age: 168h - interval: 24h - dryrun: false - readonly: - enabled: false - auth: - silly: - realm: silly-realm - service: silly-service - token: - realm: token-realm - service: token-service - issuer: registry-token-issuer - rootcertbundle: /root/certs/bundle - htpasswd: - realm: basic-realm - path: /path/to/htpasswd - middleware: - registry: - - name: ARegistryMiddleware - options: - foo: bar - repository: - - name: ARepositoryMiddleware - options: - foo: bar - storage: - - name: cloudfront - options: - baseurl: https://my.cloudfronted.domain.com/ - privatekey: /path/to/pem - keypairid: cloudfrontkeypairid - duration: 3000 - reporting: - bugsnag: - apikey: bugsnagapikey - releasestage: bugsnagreleasestage - endpoint: bugsnagendpoint - newrelic: - licensekey: newreliclicensekey - name: newrelicname - verbose: true - http: - addr: localhost:5000 - prefix: /my/nested/registry/ - host: https://myregistryaddress.org:5000 - secret: asecretforlocaldevelopment - tls: - certificate: /path/to/x509/public - key: /path/to/x509/private - clientcas: - - /path/to/ca.pem - - /path/to/another/ca.pem - debug: - addr: localhost:5001 - headers: - X-Content-Type-Options: [nosniff] - notifications: - endpoints: - - name: alistener - disabled: false - url: https://my.listener.com/event - headers: - timeout: 500 - threshold: 5 - backoff: 1000 - redis: - addr: localhost:6379 - password: asecret - db: 0 - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - health: - storagedriver: - enabled: true - interval: 10s - threshold: 3 - file: - - file: /path/to/checked/file - interval: 10s - http: - - uri: http://server.to.check/must/return/200 - headers: - Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] - statuscode: 200 - timeout: 3s - interval: 10s - threshold: 3 - tcp: - - addr: redis-server.domain.com:6379 - timeout: 3s - interval: 10s - threshold: 3 - proxy: - remoteurl: https://registry-1.docker.io - username: [username] - password: [password] - -In some instances a configuration option is **optional** but it contains child -options marked as **required**. This indicates that you can omit the parent with -all its children. However, if the parent is included, you must also include all -the children marked **required**. - -## version - - version: 0.1 - -The `version` option is **required**. It specifies the configuration's version. -It is expected to remain a top-level field, to allow for a consistent version -check before parsing the remainder of the configuration file. - -## log - -The `log` subsection configures the behavior of the logging system. The logging -system outputs everything to stdout. You can adjust the granularity and format -with this configuration section. - - log: - level: debug - formatter: text - fields: - service: registry - environment: staging - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- level - - no - - Sets the sensitivity of logging output. Permitted values are - error, warn, info and - debug. The default is info. -
- formatter - - no - - This selects the format of logging output. The format primarily affects how keyed - attributes for a log line are encoded. Options are text, json or - logstash. The default is text. -
- fields - - no - - A map of field names to values. These are added to every log line for - the context. This is useful for identifying log messages source after - being mixed in other systems. -
- -## hooks - - hooks: - - type: mail - levels: - - panic - options: - smtp: - addr: smtp.sendhost.com:25 - username: sendername - password: password - insecure: true - from: name@sendhost.com - to: - - name@receivehost.com - -The `hooks` subsection configures the logging hooks' behavior. This subsection -includes a sequence handler which you can use for sending mail, for example. -Refer to `loglevel` to configure the level of messages printed. - -## loglevel - -> **DEPRECATED:** Please use [log](#log) instead. - - loglevel: debug - -Permitted values are `error`, `warn`, `info` and `debug`. The default is -`info`. - -## storage - - storage: - filesystem: - rootdirectory: /var/lib/registry - azure: - accountname: accountname - accountkey: base64encodedaccountkey - container: containername - gcs: - bucket: bucketname - keyfile: /path/to/keyfile - rootdirectory: /gcs/object/name/prefix - s3: - accesskey: awsaccesskey - secretkey: awssecretkey - region: us-west-1 - bucket: bucketname - encrypt: true - secure: true - v4auth: true - chunksize: 5242880 - rootdirectory: /s3/object/name/prefix - rados: - poolname: radospool - username: radosuser - chunksize: 4194304 - swift: - username: username - password: password - authurl: https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth - tenant: tenantname - tenantid: tenantid - domain: domain name for Openstack Identity v3 API - domainid: domain id for Openstack Identity v3 API - insecureskipverify: true - region: fr - container: containername - rootdirectory: /swift/object/name/prefix - delete: - enabled: false - cache: - blobdescriptor: inmemory - maintenance: - uploadpurging: - enabled: true - age: 168h - interval: 24h - dryrun: false - redirect: - disable: false - -The storage option is **required** and defines which storage backend is in use. -You must configure one backend; if you configure more, the registry returns an error. - -If you are deploying a registry on Windows, be aware that a Windows volume mounted from the host is not recommended. Instead, you can use a S3, or Azure, backing data-store. If you do use a Windows volume, you must ensure that the `PATH` to the mount point is within Windows' `MAX_PATH` limits (typically 255 characters). Failure to do so can result in the following error message: - - mkdir /XXX protocol error and your registry will not function properly. - -### delete - -Use the `delete` subsection to enable the deletion of image blobs and manifests -by digest. It defaults to false, but it can be enabled by writing the following -on the configuration file: - - delete: - enabled: true - -### cache - -Use the `cache` subsection to enable caching of data accessed in the storage -backend. Currently, the only available cache provides fast access to layer -metadata. This, if configured, uses the `blobdescriptor` field. - -You can set `blobdescriptor` field to `redis` or `inmemory`. The `redis` value uses -a Redis pool to cache layer metadata. The `inmemory` value uses an in memory -map. - ->**NOTE**: Formerly, `blobdescriptor` was known as `layerinfo`. While these ->are equivalent, `layerinfo` has been deprecated, in favor or ->`blobdescriptor`. - -### redirect - -The `redirect` subsection provides configuration for managing redirects from -content backends. For backends that support it, redirecting is enabled by -default. Certain deployment scenarios may prefer to route all data through the -Registry, rather than redirecting to the backend. This may be more efficient -when using a backend that is not colocated or when a registry instance is -doing aggressive caching. - -Redirects can be disabled by adding a single flag `disable`, set to `true` -under the `redirect` section: - - redirect: - disable: true - -### filesystem - -The `filesystem` storage backend uses the local disk to store registry files. It -is ideal for development and may be appropriate for some small-scale production -applications. - -This backend has a single, required `rootdirectory` parameter. The parameter -specifies the absolute path to a directory. The registry stores all its data -here so make sure there is adequate space available. - -### azure - -This storage backend uses Microsoft's Azure Blob Storage. - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- accountname - - yes - - Azure account name. -
- accountkey - - yes - - Azure account key. -
- container - - yes - - Name of the Azure container into which to store data. -
- realm - - no - - Domain name suffix for the Storage Service API endpoint. By default, this - is core.windows.net. -
- -### gcs - -This storage backend uses Google Cloud Storage. - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- bucket - - yes - - Storage bucket name. -
- keyfile - - no - - A private service account key file in JSON format. Instead of a key file Google Application Default Credentials can be used. -
- rootdirectory - - no - - This is a prefix that will be applied to all Google Cloud Storage keys to allow you to segment data in your bucket if necessary. -
- -### rados - -This storage backend uses [Ceph Object Storage](http://ceph.com/docs/master/rados/). - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- poolname - - yes - - Ceph pool name. -
- username - - no - - Ceph cluster user to connect as (i.e. admin, not client.admin). -
- chunksize - - no - - Size of the written RADOS objects. Default value is 4MB (4194304). -
- - -### S3 - -This storage backend uses Amazon's Simple Storage Service (S3). - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- accesskey - - yes - - Your AWS Access Key. -
- secretkey - - yes - - Your AWS Secret Key. -
- region - - yes - - The AWS region in which your bucket exists. For the moment, the Go AWS - library in use does not use the newer DNS based bucket routing. -
- bucket - - yes - - The bucket name in which you want to store the registry's data. -
- encrypt - - no - - Specifies whether the registry stores the image in encrypted format or - not. A boolean value. The default is false. -
- secure - - no - - Indicates whether to use HTTPS instead of HTTP. A boolean value. The - default is true. -
- v4auth - - no - - Indicates whether the registry uses Version 4 of AWS's authentication. - Generally, you should set this to true. By default, this is - false. -
- chunksize - - no - - The S3 API requires multipart upload chunks to be at least 5MB. This value - should be a number that is larger than 5*1024*1024. -
- rootdirectory - - no - - This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary. -
- -### Maintenance - -Currently upload purging and read-only mode are the only maintenance functions available. -These and future maintenance functions which are related to storage can be configured under -the maintenance section. - -### Upload Purging - -Upload purging is a background process that periodically removes orphaned files from the upload -directories of the registry. Upload purging is enabled by default. To -configure upload directory purging, the following parameters -must be set. - - -| Parameter | Required | Description - --------- | -------- | ----------- -`enabled` | yes | Set to true to enable upload purging. Default=true. | -`age` | yes | Upload directories which are older than this age will be deleted. Default=168h (1 week) -`interval` | yes | The interval between upload directory purging. Default=24h. -`dryrun` | yes | dryrun can be set to true to obtain a summary of what directories will be deleted. Default=false. - -Note: `age` and `interval` are strings containing a number with optional fraction and a unit suffix: e.g. 45m, 2h10m, 168h (1 week). - -### Read-only mode - -If the `readonly` section under `maintenance` has `enabled` set to `true`, -clients will not be allowed to write to the registry. This mode is useful to -temporarily prevent writes to the backend storage so a garbage collection pass -can be run. Before running garbage collection, the registry should be -restarted with readonly's `enabled` set to true. After the garbage collection -pass finishes, the registry may be restarted again, this time with `readonly` -removed from the configuration (or set to false). - -### Openstack Swift - -This storage backend uses Openstack Swift object storage. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- authurl - - yes - - URL for obtaining an auth token. https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth -
- username - - yes - - Your Openstack user name. -
- password - - yes - - Your Openstack password. -
- region - - no - - The Openstack region in which your container exists. -
- container - - yes - - The container name in which you want to store the registry's data. -
- tenant - - no - - Your Openstack tenant name. -
- tenantid - - no - - Your Openstack tenant id. -
- domain - - no - - Your Openstack domain name for Identity v3 API. -
- domainid - - no - - Your Openstack domain id for Identity v3 API. -
- trustid - - no - - Your Openstack trust id for Identity v3 API. -
- insecureskipverify - - no - - true to skip TLS verification, false by default. -
- chunksize - - no - - Size of the data segments for the Swift Dynamic Large Objects. This value should be a number (defaults to 5M). -
- rootdirectory - - no - - This is a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. -
- - -## auth - - auth: - silly: - realm: silly-realm - service: silly-service - token: - realm: token-realm - service: token-service - issuer: registry-token-issuer - rootcertbundle: /root/certs/bundle - htpasswd: - realm: basic-realm - path: /path/to/htpasswd - -The `auth` option is **optional**. There are -currently 3 possible auth providers, `silly`, `token` and `htpasswd`. You can configure only -one `auth` provider. - -### silly - -The `silly` auth is only for development purposes. It simply checks for the -existence of the `Authorization` header in the HTTP request. It has no regard for -the header's value. If the header does not exist, the `silly` auth responds with a -challenge response, echoing back the realm, service, and scope that access was -denied for. - -The following values are used to configure the response: - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- realm - - yes - - The realm in which the registry server authenticates. -
- service - - yes - - The service being authenticated. -
- - - -### token - -Token based authentication allows the authentication system to be decoupled from -the registry. It is a well established authentication paradigm with a high -degree of security. - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- realm - - yes - - The realm in which the registry server authenticates. -
- service - - yes - - The service being authenticated. -
- issuer - - yes - -The name of the token issuer. The issuer inserts this into -the token so it must match the value configured for the issuer. -
- rootcertbundle - - yes - -The absolute path to the root certificate bundle. This bundle contains the -public part of the certificates that is used to sign authentication tokens. -
- -For more information about Token based authentication configuration, see the [specification](spec/auth/token.md). - -### htpasswd - -The _htpasswd_ authentication backed allows one to configure basic auth using an -[Apache HTPasswd File](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). -Only [`bcrypt`](http://en.wikipedia.org/wiki/Bcrypt) format passwords are -supported. Entries with other hash types will be ignored. The htpasswd file is -loaded once, at startup. If the file is invalid, the registry will display and -error and will not start. - -> __WARNING:__ This authentication scheme should only be used with TLS -> configured, since basic authentication sends passwords as part of the http -> header. - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- realm - - yes - - The realm in which the registry server authenticates. -
- path - - yes - - Path to htpasswd file to load at startup. -
- -## middleware - -The `middleware` option is **optional**. Use this option to inject middleware at -named hook points. All middlewares must implement the same interface as the -object they're wrapping. This means a registry middleware must implement the -`distribution.Namespace` interface, repository middleware must implement -`distribution.Repository`, and storage middleware must implement -`driver.StorageDriver`. - -Currently only one middleware, `cloudfront`, a storage middleware, is supported -in the registry implementation. - - middleware: - registry: - - name: ARegistryMiddleware - options: - foo: bar - repository: - - name: ARepositoryMiddleware - options: - foo: bar - storage: - - name: cloudfront - options: - baseurl: https://my.cloudfronted.domain.com/ - privatekey: /path/to/pem - keypairid: cloudfrontkeypairid - duration: 3000 - -Each middleware entry has `name` and `options` entries. The `name` must -correspond to the name under which the middleware registers itself. The -`options` field is a map that details custom configuration required to -initialize the middleware. It is treated as a `map[string]interface{}`. As such, -it supports any interesting structures desired, leaving it up to the middleware -initialization function to best determine how to handle the specific -interpretation of the options. - -### cloudfront - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- baseurl - - yes - - SCHEME://HOST[/PATH] at which Cloudfront is served. -
- privatekey - - yes - - Private Key for Cloudfront provided by AWS. -
- keypairid - - yes - - Key pair ID provided by AWS. -
- duration - - no - - Duration for which a signed URL should be valid. -
- - -## reporting - - reporting: - bugsnag: - apikey: bugsnagapikey - releasestage: bugsnagreleasestage - endpoint: bugsnagendpoint - newrelic: - licensekey: newreliclicensekey - name: newrelicname - verbose: true - -The `reporting` option is **optional** and configures error and metrics -reporting tools. At the moment only two services are supported, [New -Relic](http://newrelic.com/) and [Bugsnag](http://bugsnag.com), a valid -configuration may contain both. - -### bugsnag - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- apikey - - yes - - API Key provided by Bugsnag -
- releasestage - - no - - Tracks where the registry is deployed, for example, - production,staging, or - development. -
- endpoint - - no - - Specify the enterprise Bugsnag endpoint. -
- - -### newrelic - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- licensekey - - yes - - License key provided by New Relic. -
- name - - no - - New Relic application name. -
- verbose - - no - - Enable New Relic debugging output on stdout. -
- -## http - - http: - addr: localhost:5000 - net: tcp - prefix: /my/nested/registry/ - host: https://myregistryaddress.org:5000 - secret: asecretforlocaldevelopment - tls: - certificate: /path/to/x509/public - key: /path/to/x509/private - clientcas: - - /path/to/ca.pem - - /path/to/another/ca.pem - debug: - addr: localhost:5001 - headers: - X-Content-Type-Options: [nosniff] - -The `http` option details the configuration for the HTTP server that hosts the registry. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- addr - - yes - - The address for which the server should accept connections. The form depends on a network type (see net option): - HOST:PORT for tcp and FILE for a unix socket. -
- net - - no - - The network which is used to create a listening socket. Known networks are unix and tcp. - The default empty value means tcp. -
- prefix - - no - -If the server does not run at the root path use this value to specify the -prefix. The root path is the section before v2. It -should have both preceding and trailing slashes, for example /path/. -
- host - - no - -This parameter specifies an externally-reachable address for the registry, as a -fully qualified URL. If present, it is used when creating generated URLs. -Otherwise, these URLs are derived from client requests. -
- secret - - yes - -A random piece of data. This is used to sign state that may be stored with the -client to protect against tampering. For production environments you should generate a -random piece of data using a cryptographically secure random generator. This -configuration parameter may be omitted, in which case the registry will automatically -generate a secret at launch. -

-WARNING: If you are building a cluster of registries behind a load balancer, you MUST -ensure the secret is the same for all registries. -

- - -### tls - -The `tls` struct within `http` is **optional**. Use this to configure TLS -for the server. If you already have a server such as Nginx or Apache running on -the same host as the registry, you may prefer to configure TLS termination there -and proxy connections to the registry server. - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- certificate - - yes - - Absolute path to x509 cert file -
- key - - yes - - Absolute path to x509 private key file. -
- clientcas - - no - - An array of absolute paths to a x509 CA file -
- - -### debug - -The `debug` option is **optional** . Use it to configure a debug server that -can be helpful in diagnosing problems. The debug endpoint can be used for -monitoring registry metrics and health, as well as profiling. Sensitive -information may be available via the debug endpoint. Please be certain that -access to the debug endpoint is locked down in a production environment. - -The `debug` section takes a single, required `addr` parameter. This parameter -specifies the `HOST:PORT` on which the debug server should accept connections. - - -### headers - -The `headers` option is **optional** . Use it to specify headers that the HTTP -server should include in responses. This can be used for security headers such -as `Strict-Transport-Security`. - -The `headers` option should contain an option for each header to include, where -the parameter name is the header's name, and the parameter value a list of the -header's payload values. - -Including `X-Content-Type-Options: [nosniff]` is recommended, so that browsers -will not interpret content as HTML if they are directed to load a page from the -registry. This header is included in the example configuration files. - - -## notifications - - notifications: - endpoints: - - name: alistener - disabled: false - url: https://my.listener.com/event - headers: - timeout: 500 - threshold: 5 - backoff: 1000 - -The notifications option is **optional** and currently may contain a single -option, `endpoints`. - -### endpoints - -Endpoints is a list of named services (URLs) that can accept event notifications. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- name - - yes - -A human readable name for the service. -
- disabled - - no - -A boolean to enable/disable notifications for a service. -
- url - - yes - -The URL to which events should be published. -
- headers - - yes - - Static headers to add to each request. Each header's name should be a key - underneath headers, and each value is a list of payloads for that - header name. Note that values must always be lists. -
- timeout - - yes - - An HTTP timeout value. This field takes a positive integer and an optional - suffix indicating the unit of time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. -
- threshold - - yes - - An integer specifying how long to wait before backing off a failure. -
- backoff - - yes - - How long the system backs off before retrying. This field takes a positive - integer and an optional suffix indicating the unit of time. Possible units - are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. -
- - -## redis - - redis: - addr: localhost:6379 - password: asecret - db: 0 - dialtimeout: 10ms - readtimeout: 10ms - writetimeout: 10ms - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - -Declare parameters for constructing the redis connections. Registry instances -may use the Redis instance for several applications. The current purpose is -caching information about immutable blobs. Most of the options below control -how the registry connects to redis. You can control the pool's behavior -with the [pool](#pool) subsection. - -It's advisable to configure Redis itself with the **allkeys-lru** eviction policy -as the registry does not set an expire value on keys. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- addr - - yes - - Address (host and port) of redis instance. -
- password - - no - - A password used to authenticate to the redis instance. -
- db - - no - - Selects the db for each connection. -
- dialtimeout - - no - - Timeout for connecting to a redis instance. -
- readtimeout - - no - - Timeout for reading from redis connections. -
- writetimeout - - no - - Timeout for writing to redis connections. -
- - -### pool - - pool: - maxidle: 16 - maxactive: 64 - idletimeout: 300s - -Configure the behavior of the Redis connection pool. - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- maxidle - - no - - Sets the maximum number of idle connections. -
- maxactive - - no - - sets the maximum number of connections that should - be opened before blocking a connection request. -
- idletimeout - - no - - sets the amount time to wait before closing - inactive connections. -
- -## health - - health: - storagedriver: - enabled: true - interval: 10s - threshold: 3 - file: - - file: /path/to/checked/file - interval: 10s - http: - - uri: http://server.to.check/must/return/200 - headers: - Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] - statuscode: 200 - timeout: 3s - interval: 10s - threshold: 3 - tcp: - - addr: redis-server.domain.com:6379 - timeout: 3s - interval: 10s - threshold: 3 - -The health option is **optional**. It may contain preferences for a periodic -health check on the storage driver's backend storage, and optional periodic -checks on local files, HTTP URIs, and/or TCP servers. The results of the health -checks are available at /debug/health on the debug HTTP server if the debug -HTTP server is enabled (see http section). - -### storagedriver - -storagedriver contains options for a health check on the configured storage -driver's backend storage. enabled must be set to true for this health check to -be active. - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- enabled - - yes - -"true" to enable the storage driver health check or "false" to disable it. -
- interval - - no - - The length of time to wait between repetitions of the check. This field - takes a positive integer and an optional suffix indicating the unit of - time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. - The default value is 10 seconds if this field is omitted. -
- threshold - - no - - An integer specifying the number of times the check must fail before the - check triggers an unhealthy state. If this filed is not specified, a - single failure will trigger an unhealthy state. -
- -### file - -file is a list of paths to be periodically checked for the existence of a file. -If a file exists at the given path, the health check will fail. This can be -used as a way of bringing a registry out of rotation by creating a file. - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- file - - yes - -The path to check for the existence of a file. -
- interval - - no - - The length of time to wait between repetitions of the check. This field - takes a positive integer and an optional suffix indicating the unit of - time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. - The default value is 10 seconds if this field is omitted. -
- -### http - -http is a list of HTTP URIs to be periodically checked with HEAD requests. If -a HEAD request doesn't complete or returns an unexpected status code, the -health check will fail. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- uri - - yes - -The URI to check. -
- headers - - no - - Static headers to add to each request. Each header's name should be a key - underneath headers, and each value is a list of payloads for that - header name. Note that values must always be lists. -
- statuscode - - no - -Expected status code from the HTTP URI. Defaults to 200. -
- timeout - - no - - The length of time to wait before timing out the HTTP request. This field - takes a positive integer and an optional suffix indicating the unit of - time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. -
- interval - - no - - The length of time to wait between repetitions of the check. This field - takes a positive integer and an optional suffix indicating the unit of - time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. - The default value is 10 seconds if this field is omitted. -
- threshold - - no - - An integer specifying the number of times the check must fail before the - check triggers an unhealthy state. If this filed is not specified, a - single failure will trigger an unhealthy state. -
- -### tcp - -tcp is a list of TCP addresses to be periodically checked with connection -attempts. The addresses must include port numbers. If a connection attempt -fails, the health check will fail. - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- addr - - yes - -The TCP address to connect to, including a port number. -
- timeout - - no - - The length of time to wait before timing out the TCP connection. This - field takes a positive integer and an optional suffix indicating the unit - of time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. -
- interval - - no - - The length of time to wait between repetitions of the check. This field - takes a positive integer and an optional suffix indicating the unit of - time. Possible units are: -
    -
  • ns (nanoseconds)
  • -
  • us (microseconds)
  • -
  • ms (milliseconds)
  • -
  • s (seconds)
  • -
  • m (minutes)
  • -
  • h (hours)
  • -
- If you omit the suffix, the system interprets the value as nanoseconds. - The default value is 10 seconds if this field is omitted. -
- threshold - - no - - An integer specifying the number of times the check must fail before the - check triggers an unhealthy state. If this filed is not specified, a - single failure will trigger an unhealthy state. -
- -## Proxy - - proxy: - remoteurl: https://registry-1.docker.io - username: [username] - password: [password] - -Proxy enables a registry to be configured as a pull through cache to the official Docker Hub. See [mirror](mirror.md) for more information. Pushing to a registry configured as a pull through cache is currently unsupported. - - - - - - - - - - - - - - - - - - - - - - -
ParameterRequiredDescription
- remoteurl - - yes - - The URL of the official Docker Hub -
- username - - no - - The username of the Docker Hub account -
- password - - no - - The password for the official Docker Hub account -
- -To enable pulling private repositories (e.g. `batman/robin`) a username and password for user `batman` must be specified. Note: These private repositories will be stored in the proxy cache's storage and relevant measures should be taken to protect access to this. - - -## Example: Development configuration - -The following is a simple example you can use for local development: - - version: 0.1 - log: - level: debug - storage: - filesystem: - rootdirectory: /var/lib/registry - http: - addr: localhost:5000 - secret: asecretforlocaldevelopment - debug: - addr: localhost:5001 - -The above configures the registry instance to run on port `5000`, binding to -`localhost`, with the `debug` server enabled. Registry data storage is in the -`/var/lib/registry` directory. Logging is in `debug` mode, which is the most -verbose. - -A similar simple configuration is available at -[config-example.yml](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). -Both are generally useful for local development. - - -## Example: Middleware configuration - -This example illustrates how to configure storage middleware in a registry. -Middleware allows the registry to serve layers via a content delivery network -(CDN). This is useful for reducing requests to the storage layer. - -Currently, the registry supports [Amazon -Cloudfront](http://aws.amazon.com/cloudfront/). You can only use Cloudfront in -conjunction with the S3 storage driver. - - - - - - - - - - - - - - - - - - -
ParameterDescription
nameThe storage middleware name. Currently cloudfront is an accepted value.
disabledSet to false to easily disable the middleware.
options: - A set of key/value options to configure the middleware. -
    -
  • baseurl: The Cloudfront base URL.
  • -
  • privatekey: The location of your AWS private key on the filesystem.
  • -
  • keypairid: The ID of your Cloudfront keypair.
  • -
  • duration: The duration in minutes for which the URL is valid. Default is 20.
  • -
-
- -The following example illustrates these values: - - middleware: - storage: - - name: cloudfront - disabled: false - options: - baseurl: http://d111111abcdef8.cloudfront.net - privatekey: /path/to/asecret.pem - keypairid: asecret - duration: 60 - - ->**Note**: Cloudfront keys exist separately to other AWS keys. See ->[the documentation on AWS credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) ->for more information. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md deleted file mode 100644 index 77e8f05c0239..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/deploying.md +++ /dev/null @@ -1,231 +0,0 @@ - - -# Deploying a registry server - -You need to [install Docker version 1.6.0 or newer](https://docs.docker.com/installation/). - -## Running on localhost - -Start your registry: - - docker run -d -p 5000:5000 --restart=always --name registry registry:2 - -You can now use it with docker. - -Get any image from the hub and tag it to point to your registry: - - docker pull ubuntu && docker tag ubuntu localhost:5000/ubuntu - -... then push it to your registry: - - docker push localhost:5000/ubuntu - -... then pull it back from your registry: - - docker pull localhost:5000/ubuntu - -To stop your registry, you would: - - docker stop registry && docker rm -v registry - -## Storage - -By default, your registry data is persisted as a [docker volume](https://docs.docker.com/userguide/dockervolumes/) on the host filesystem. Properly understanding volumes is essential if you want to stick with a local filesystem storage. - -Specifically, you might want to point your volume location to a specific place in order to more easily access your registry data. To do so you can: - - docker run -d -p 5000:5000 --restart=always --name registry \ - -v `pwd`/data:/var/lib/registry \ - registry:2 - -### Alternatives - -You should usually consider using [another storage backend](https://github.com/docker/distribution/blob/master/docs/storagedrivers.md) instead of the local filesystem. Use the [storage configuration options](https://github.com/docker/distribution/blob/master/docs/configuration.md#storage) to configure an alternate storage backend. - -Using one of these will allow you to more easily scale your registry, and leverage your storage redundancy and availability features. - -## Running a domain registry - -While running on `localhost` has its uses, most people want their registry to be more widely available. To do so, the Docker engine requires you to secure it using TLS, which is conceptually very similar to configuring your web server with SSL. - -### Get a certificate - -Assuming that you own the domain `myregistrydomain.com`, and that its DNS record points to the host where you are running your registry, you first need to get a certificate from a CA. - -Create a `certs` directory: - - mkdir -p certs - -Then move and/or rename your crt file to: `certs/domain.crt`, and your key file to: `certs/domain.key`. - -Make sure you stopped your registry from the previous steps, then start your registry again with TLS enabled: - - docker run -d -p 5000:5000 --restart=always --name registry \ - -v `pwd`/certs:/certs \ - -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ - -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ - registry:2 - -You should now be able to access your registry from another docker host: - - docker pull ubuntu - docker tag ubuntu myregistrydomain.com:5000/ubuntu - docker push myregistrydomain.com:5000/ubuntu - docker pull myregistrydomain.com:5000/ubuntu - -#### Gotcha - -A certificate issuer may supply you with an *intermediate* certificate. In this case, you must combine your certificate with the intermediate's to form a *certificate bundle*. You can do this using the `cat` command: - - cat domain.crt intermediate-certificates.pem > certs/domain.crt - -### Alternatives - -While rarely advisable, you may want to use self-signed certificates instead, or use your registry in an insecure fashion. You will find instructions [here](insecure.md). - -## Load Balancing Considerations - -One may want to use a load balancer to distribute load, terminate TLS or -provide high availability. While a full load balancing setup is outside the -scope of this document, there are a few considerations that can make the process -smoother. - -The most important aspect is that a load balanced cluster of registries must -share the same resources. For the current version of the registry, this means -the following must be the same: - - - Storage Driver - - HTTP Secret - - Redis Cache (if configured) - -If any of these are different, the registry will have trouble serving requests. -As an example, if you're using the filesystem driver, all registry instances -must have access to the same filesystem root, which means they should be in -the same machine. For other drivers, such as s3 or azure, they should be -accessing the same resource, and will likely share an identical configuration. -The _HTTP Secret_ coordinates uploads, so also must be the same across -instances. Configuring different redis instances will work (at the time -of writing), but will not be optimal if the instances are not shared, causing -more requests to be directed to the backend. - -Getting the headers correct is very important. For all responses to any -request under the "/v2/" url space, the `Docker-Distribution-API-Version` -header should be set to the value "registry/2.0", even for a 4xx response. -This header allows the docker engine to quickly resolve authentication realms -and fallback to version 1 registries, if necessary. Confirming this is setup -correctly can help avoid problems with fallback. - -In the same train of thought, you must make sure you are properly sending the -`X-Forwarded-Proto`, `X-Forwarded-For` and `Host` headers to their "client-side" -values. Failure to do so usually makes the registry issue redirects to internal -hostnames or downgrading from https to http. - -A properly secured registry should return 401 when the "/v2/" endpoint is hit -without credentials. The response should include a `WWW-Authenticate` -challenge, providing guidance on how to authenticate, such as with basic auth -or a token service. If the load balancer has health checks, it is recommended -to configure it to consider a 401 response as healthy and any other as down. -This will secure your registry by ensuring that configuration problems with -authentication don't accidentally expose an unprotected registry. If you're -using a less sophisticated load balancer, such as Amazon's Elastic Load -Balancer, that doesn't allow one to change the healthy response code, health -checks can be directed at "/", which will always return a `200 OK` response. - -## Restricting access - -Except for registries running on secure local networks, registries should always implement access restrictions. - -### Native basic auth - -The simplest way to achieve access restriction is through basic authentication (this is very similar to other web servers' basic authentication mechanism). - -> **Warning**: You **cannot** use authentication with an insecure registry. You have to [configure TLS first](#running-a-domain-registry) for this to work. - -First create a password file with one entry for the user "testuser", with password "testpassword": - - mkdir auth - docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > auth/htpasswd - -Make sure you stopped your registry from the previous step, then start it again: - - docker run -d -p 5000:5000 --restart=always --name registry \ - -v `pwd`/auth:/auth \ - -e "REGISTRY_AUTH=htpasswd" \ - -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \ - -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \ - -v `pwd`/certs:/certs \ - -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ - -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ - registry:2 - -You should now be able to: - - docker login myregistrydomain.com:5000 - -And then push and pull images as an authenticated user. - -#### Gotcha - -Seeing X509 errors is usually a sign you are trying to use self-signed certificates, and failed to [configure your docker daemon properly](insecure.md). - -### Alternatives - -1. You may want to leverage more advanced basic auth implementations through a proxy design, in front of the registry. You will find examples of such patterns in the [recipes list](recipes.md). - -2. Alternatively, the Registry also supports delegated authentication, redirecting users to a specific, trusted token server. That approach requires significantly more investment, and only makes sense if you want to fully configure ACLs and more control over the Registry integration into your global authorization and authentication systems. - -You will find [background information here](spec/auth/token.md), and [configuration information here](configuration.md#auth). - -Beware that you will have to implement your own authentication service for this to work, or leverage a third-party implementation. - -## Managing with Compose - -As your registry configuration grows more complex, dealing with it can quickly become tedious. - -It's highly recommended to use [Docker Compose](https://docs.docker.com/compose/) to facilitate operating your registry. - -Here is a simple `docker-compose.yml` example that condenses everything explained so far: - -``` -registry: - restart: always - image: registry:2 - ports: - - 5000:5000 - environment: - REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt - REGISTRY_HTTP_TLS_KEY: /certs/domain.key - REGISTRY_AUTH: htpasswd - REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd - REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm - volumes: - - /path/data:/var/lib/registry - - /path/certs:/certs - - /path/auth:/auth -``` - -> **Warning**: replace `/path` by whatever directory that holds your `certs` and `auth` folder from above. - -You can then start your registry with a simple - - docker-compose up -d - -## Next - -You will find more specific and advanced informations in the following sections: - - - [Configuration reference](configuration.md) - - [Working with notifications](notifications.md) - - [Advanced "recipes"](recipes.md) - - [Registry API](spec/api.md) - - [Storage driver model](storagedrivers.md) - - [Token authentication](spec/auth/token.md) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md deleted file mode 100644 index 8159b5202563..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/glossary.md +++ /dev/null @@ -1,70 +0,0 @@ - - -# Glossary - -This page contains definitions for distribution related terms. - -
-

Blob

-
-
A blob is any kind of content that is stored by a Registry under a content-addressable identifier (a "digest").
-

- Layers are a good example of "blobs". -

-
- -

Image

-
-
An image is a named set of immutable data from which a Docker container can be created.
-

- An image is represented by a json file called a manifest, and is conceptually a set of layers. - - Image names indicate the location where they can be pulled from and pushed to, as they usually start with a registry domain name and port. - -

-
- -

Layer

-
-
A layer is a tar archive bundling partial content from a filesystem.
-

- Layers from an image are usually extracted in order on top of each other to make up a root filesystem from which containers run out. -

-
- -

Manifest

-
A manifest is the JSON representation of an image.
- -

Namespace

-
A namespace is a collection of repositories with a common name prefix.
-

- The namespace with an empty prefix is considered the Global Namespace. -

-
- -

Registry

-
A registry is a service that let you store and deliver images.
-
- -

Repository

-
-
A repository is a set of data containing all versions of a given image.
-
- -

Scope

-
A scope is the portion of a namespace onto which a given authorization token is granted.
- -

Tag

-
A tag is conceptually a "version" of a named image.
-

- Example: `docker pull myimage:latest` instructs docker to pull the image "myimage" in version "latest". -

- -
- - -
diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md deleted file mode 100644 index c6ac7ad95540..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/help.md +++ /dev/null @@ -1,24 +0,0 @@ - - -# Getting help - -If you need help, or just want to chat, you can reach us: - -- on irc: `#docker-distribution` on freenode -- on the [mailing list](https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution) (mail at ) - -If you want to report a bug: - -- be sure to first read about [how to contribute](https://github.com/docker/distribution/blob/master/CONTRIBUTING.md) -- you can then do so on the [GitHub project bugtracker](https://github.com/docker/distribution/issues) - -You can also find out more about the Docker's project [Getting Help resources](https://docs.docker.com/opensource/get-help/). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.gliffy b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.gliffy deleted file mode 100644 index 5ecf4c3aed6c..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":630,"nodeIndex":171,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":290,"y":83},"max":{"x":736.5,"y":630}},"objects":[{"x":699.0,"y":246.0,"rotation":0.0,"id":166,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-30.0,-12.0],[-30.0,59.5],[33.0,59.5],[33.0,131.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":632.0,"y":243.0,"rotation":0.0,"id":165,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-25.0,-11.0],[-25.0,64.5],[-88.0,64.5],[-88.0,140.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[]},{"x":512.0,"y":203.0,"rotation":0.0,"id":161,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-19.0,-3.0],[79.12746812182615,-3.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":589.9999999999999,"y":167.5,"rotation":0.0,"id":143,"width":101.11111111111111,"height":65.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":0.722222222222222,"y":0.0,"rotation":0.0,"id":144,"width":99.66666666666663,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Broadcaster

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":290.0,"y":105.0,"rotation":0.0,"id":160,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":26,"lockAspectRatio":false,"lockShape":false,"children":[{"x":12.92581625076238,"y":17.018834253729665,"rotation":0.0,"id":155,"width":189.57418374923762,"height":151.48116574627034,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":25,"lockAspectRatio":false,"lockShape":false,"children":[{"x":97.57418374923762,"y":58.481165746270335,"rotation":90.0,"id":151,"width":149.0,"height":37.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":153,"magnitude":1},{"id":154,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":152,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":151,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":151,"magnitude":1},{"id":154,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":153,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Listener

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":67.5,"y":1.0,"rotation":0.0,"id":154,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":152,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":10.074195639419855,"y":17.481165746270335,"rotation":0.0,"id":150,"width":120.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":20,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":80.5,"rotation":0.0,"id":133,"width":117.0,"height":38.5,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":16,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":135,"magnitude":1},{"id":136,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":134,"width":117.0,"height":30.5,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":133,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":133,"magnitude":1},{"id":136,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":135,"width":117.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

handler

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":51.5,"y":1.0,"rotation":0.0,"id":136,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":134,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":129,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":12,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":131,"magnitude":1},{"id":132,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":130,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":129,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":129,"magnitude":1},{"id":132,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":131,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":132,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":130,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":125,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":127,"magnitude":1},{"id":128,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":125,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":125,"magnitude":1},{"id":128,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

request

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":128,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":126,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.5154455517800614,"y":0.5154455517799761,"rotation":90.39513704250749,"id":145,"width":150.0,"height":150.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":147,"magnitude":1},{"id":148,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":146,"width":150.0,"height":142.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":145,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":145,"magnitude":1},{"id":148,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":147,"width":150.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":68.0,"y":0.9999999999999432,"rotation":0.0,"id":148,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":146,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":156,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":159,"width":206.0,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry instance

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":473.0,"y":525.0,"rotation":0.0,"id":115,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":69,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":68,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":109,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.0,4.5],[2.0,11.533649282003012],[2.0,18.567298564006137],[2.0,25.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":665.0,"y":530.0,"rotation":0.0,"id":114,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":68,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":100,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":112,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.0,-0.5],[-2.0,6.533649282003012],[-2.0,13.567298564006137],[-2.0,20.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":598.0,"y":550.0,"rotation":0.0,"id":112,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":113,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":420.0,"y":550.0,"rotation":0.0,"id":109,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":111,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":540.0,"y":438.5,"rotation":0.0,"id":104,"width":50.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":63,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

. . .

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":410.0,"y":379.5,"rotation":0.0,"id":103,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":84,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":45,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":80,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":41,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":82,"magnitude":1},{"id":83,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":81,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":80,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":80,"magnitude":1},{"id":83,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":82,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":83,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":81,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":76,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":37,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":78,"magnitude":1},{"id":79,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":77,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":76,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":76,"magnitude":1},{"id":79,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":78,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":79,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":77,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":72,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":33,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":74,"magnitude":1},{"id":75,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":73,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":72,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":72,"magnitude":1},{"id":75,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":74,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":75,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":73,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":68,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":71,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_1

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":598.0,"y":379.5,"rotation":0.0,"id":102,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":87,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":60,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":88,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":56,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":90,"magnitude":1},{"id":91,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":89,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":88,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":88,"magnitude":1},{"id":91,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":90,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":91,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":89,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":92,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":94,"magnitude":1},{"id":95,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":93,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":92,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":92,"magnitude":1},{"id":95,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":94,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":95,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":93,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":48,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":98,"magnitude":1},{"id":99,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":97,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":96,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":96,"magnitude":1},{"id":99,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":98,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":99,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":97,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":100,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":101,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_N

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]}],"shapeStyles":{"com.gliffy.shape.sitemap.sitemap_v1.default":{"fill":"#ffffff","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.network.network_v3.home":{"fill":"#000000"},"com.gliffy.shape.network.network_v3.business":{"fill":"#003366"},"com.gliffy.shape.basic.basic_v1.default":{"fill":"#FFFFFF","stroke":"#434343","strokeWidth":2}},"lineStyles":{"global":{"endArrow":1}},"textStyles":{"global":{"size":"14px"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.uml.uml_v2.state_machine","com.gliffy.libraries.uml.uml_v2.deployment","com.gliffy.libraries.uml.uml_v2.use_case","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.component","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.png b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.png deleted file mode 100644 index 09de8d2376d6..000000000000 Binary files a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.png and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.svg b/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.svg deleted file mode 100644 index 6c3d680b95bf..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/images/notifications.svg +++ /dev/null @@ -1 +0,0 @@ -Registry instanceBroadcaster requestrepositoryhandlerListenerEndpoint_1queueretryhttpEndpoint_Nqueueretryhttp. . .RemoteEndpoint_1RemoteEndpoint_N \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md deleted file mode 100644 index 6cd6769e64b6..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/index.md +++ /dev/null @@ -1,66 +0,0 @@ - - -# Docker Registry - -## What it is - -The Registry is a stateless, highly scalable server side application that stores and lets you distribute Docker images. -The Registry is open-source, under the permissive [Apache license](http://en.wikipedia.org/wiki/Apache_License). - -## Why use it - -You should use the Registry if you want to: - - * tightly control where your images are being stored - * fully own your images distribution pipeline - * integrate image storage and distribution tightly into your in-house development workflow - -## Alternatives - -Users looking for a zero maintenance, ready-to-go solution are encouraged to head-over to the [Docker Hub](https://hub.docker.com), which provides a free-to-use, hosted Registry, plus additional features (organization accounts, automated builds, and more). - -Users looking for a commercially supported version of the Registry should look into [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry/). - -## Requirements - -The Registry is compatible with Docker engine **version 1.6.0 or higher**. -If you really need to work with older Docker versions, you should look into the [old python registry](https://github.com/docker/docker-registry). - -## TL;DR - -Start your registry - - docker run -d -p 5000:5000 --name registry registry:2 - -Pull (or build) some image from the hub - - docker pull ubuntu - -Tag the image so that it points to your registry - - docker tag ubuntu localhost:5000/myfirstimage - -Push it - - docker push localhost:5000/myfirstimage - -Pull it back - - docker pull localhost:5000/myfirstimage - -Now stop your registry and remove all data - - docker stop registry && docker rm -v registry - -## Next - -You should now read the [detailed introduction about the registry](introduction.md), or jump directly to [deployment instructions](deploying.md). - \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/insecure.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/insecure.md deleted file mode 100644 index 9ccb54196816..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/insecure.md +++ /dev/null @@ -1,90 +0,0 @@ - - -# Insecure Registry - -While it's highly recommended to secure your registry using a TLS certificate issued by a known CA, you may alternatively decide to use self-signed certificates, or even use your registry over plain http. - -You have to understand the downsides in doing so, and the extra burden in configuration. - -## Deploying a plain HTTP registry - -> **Warning**: it's not possible to use an insecure registry with basic authentication - -This basically tells Docker to entirely disregard security for your registry. - -1. edit the file `/etc/default/docker` so that there is a line that reads: `DOCKER_OPTS="--insecure-registry myregistrydomain.com:5000"` (or add that to existing `DOCKER_OPTS`) -2. restart your Docker daemon: on ubuntu, this is usually `service docker stop && service docker start` - -**Pros:** - - - relatively easy to configure - -**Cons:** - - - this is **very** insecure: you are basically exposing yourself to trivial MITM, and this solution should only be used for isolated testing or in a tightly controlled, air-gapped environment - - you have to configure every docker daemon that wants to access your registry - -## Using self-signed certificates - -> **Warning**: using this along with basic authentication requires to **also** trust the certificate into the OS cert store for some versions of docker (see below) - -Generate your own certificate: - - mkdir -p certs && openssl req \ - -newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \ - -x509 -days 365 -out certs/domain.crt - -Be sure to use the name `myregistrydomain.com` as a CN. - -Use the result to [start your registry with TLS enabled](https://github.com/docker/distribution/blob/master/docs/deploying.md#get-a-certificate) - -Then you have to instruct every docker daemon to trust that certificate. This is done by copying the `domain.crt` file to `/etc/docker/certs.d/myregistrydomain.com:5000/ca.crt`. - -Don't forget to restart docker after doing so. - -**Pros:** - - - more secure than the insecure registry solution - -**Cons:** - - - you have to configure every docker daemon that wants to access your registry - -## Failing... - -Failing to configure docker and trying to pull from a registry that is not using TLS will result in the following message: - -``` -FATA[0000] Error response from daemon: v1 ping attempt failed with error: -Get https://myregistrydomain.com:5000/v1/_ping: tls: oversized record received with length 20527. -If this private registry supports only HTTP or HTTPS with an unknown CA certificate,please add -`--insecure-registry myregistrydomain.com:5000` to the daemon's arguments. -In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; -simply place the CA certificate at /etc/docker/certs.d/myregistrydomain.com:5000/ca.crt -``` - -## Docker still complains about the certificate when using authentication? - -When using authentication, some versions of docker also require you to trust the certificate at the OS level. - -Usually, on Ubuntu this is done with: - - cp auth/domain.crt /usr/local/share/ca-certificates/myregistrydomain.com.crt - update-ca-certificates - -... and on Red Hat (and its derivatives) with: - - cp auth/domain.crt /etc/pki/ca-trust/source/anchors/myregistrydomain.com.crt - update-ca-trust - -... On some distributions, e.g. Oracle Linux 6, the Shared System Certificates feature needs to be manually enabled: - - update-ca-trust enable - -Now restart docker (`service docker stop && service docker start`, or any other way you use to restart docker). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md deleted file mode 100644 index aefefc34701d..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/introduction.md +++ /dev/null @@ -1,55 +0,0 @@ - - -# Understanding the Registry - -A registry is a storage and content delivery system, holding named Docker images, available in different tagged versions. - - > Example: the image `distribution/registry`, with tags `2.0` and `2.1`. - -Users interact with a registry by using docker push and pull commands. - - > Example: `docker pull registry-1.docker.io/distribution/registry:2.1`. - -Storage itself is delegated to drivers. The default storage driver is the local posix filesystem, which is suitable for development or small deployments. Additional cloud-based storage drivers like S3, Microsoft Azure, Ceph Rados, OpenStack Swift and Aliyun OSS are also supported. People looking into using other storage backends may do so by writing their own driver implementing the [Storage API](storagedrivers.md). - -Since securing access to your hosted images is paramount, the Registry natively supports TLS and basic authentication. - -The Registry GitHub repository includes additional information about advanced authentication and authorization methods. Only very large or public deployments are expected to extend the Registry in this way. - -Finally, the Registry ships with a robust [notification system](notifications.md), calling webhooks in response to activity, and both extensive logging and reporting, mostly useful for large installations that want to collect metrics. - -## Understanding image naming - -Image names as used in typical docker commands reflect their origin: - - * `docker pull ubuntu` instructs docker to pull an image named `ubuntu` from the official Docker Hub. This is simply a shortcut for the longer `docker pull docker.io/library/ubuntu` command - * `docker pull myregistrydomain:port/foo/bar` instructs docker to contact the registry located at `myregistrydomain:port` to find the image `foo/bar` - -You can find out more about the various Docker commands dealing with images in the [official Docker engine documentation](https://docs.docker.com/reference/commandline/cli/). - -## Use cases - -Running your own Registry is a great solution to integrate with and complement your CI/CD system. In a typical workflow, a commit to your source revision control system would trigger a build on your CI system, which would then push a new image to your Registry if the build is successful. A notification from the Registry would then trigger a deployment on a staging environment, or notify other systems that a new image is available. - -It's also an essential component if you want to quickly deploy a new image over a large cluster of machines. - -Finally, it's the best way to distribute images inside an isolated network. - -## Requirements - -You absolutely need to be familiar with Docker, specifically with regard to pushing and pulling images. You must understand the difference between the daemon and the cli, and at least grasp basic concepts about networking. - -Also, while just starting a registry is fairly easy, operating it in a production environment requires operational skills, just like any other service. You are expected to be familiar with systems availability and scalability, logging and log processing, systems monitoring, and security 101. Strong understanding of http and overall network communications, plus familiarity with golang are certainly useful as well for advanced operations or hacking. - -## Next - -Dive into [deploying your registry](deploying.md) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md deleted file mode 100644 index da0aba91a9db..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/migration.md +++ /dev/null @@ -1,30 +0,0 @@ - - -# Migrating a 1.0 registry to 2.0 - -TODO: This needs to be revised in light of Olivier's work - -A few thoughts here: - -There was no "1.0". There was an implementation of the Registry API V1 but only a version 0.9 of the service was released. -The image formats are not compatible in any way. One must convert v1 images to v2 images using a docker client or other tool. -One can migrate images from one version to the other by pulling images from the old registry and pushing them to the v2 registry. - ------ - -The Docker Registry 2.0 is backward compatible with images created by the earlier specification. If you are migrating a private registry to version 2.0, you should use the following process: - -1. Configure and test a 2.0 registry image in a sandbox environment. - -2. Back up up your production image storage. - - Your production image storage should reside on a volume or storage backend. - Make sure you have a backup of its contents. - -3. Stop your existing registry service. - -4. Restart your registry with your tested 2.0 image. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md deleted file mode 100644 index feb2630c0e4e..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/mirror.md +++ /dev/null @@ -1,72 +0,0 @@ - - -# Registry as a pull through cache - -## Use-case - -If you have multiple instances of Docker running in your environment (e.g., multiple physical or virtual machines, all running the Docker daemon), each time one of them requires an image that it doesn’t have it will go out to the internet and fetch it from the public Docker registry. By running a local registry mirror, you can keep most of the redundant image fetch traffic on your local network. - -### Alternatives - -Alternatively, if the set of images you are using is well delimited, you can simply pull them manually and push them to a simple, local, private registry. - -Furthermore, if your images are all built in-house, not using the Hub at all and relying entirely on your local registry is the simplest scenario. - -### Gotcha - -It's currently not possible to mirror another private registry. Only the central Hub can be mirrored. - -### Solution - -The Registry can be configured as a pull through cache. In this mode a Registry responds to all normal docker pull requests but stores all content locally. - -## How does it work? - -The first time you request an image from your local registry mirror, it pulls the image from the public Docker registry and stores it locally before handing it back to you. On subsequent requests, the local registry mirror is able to serve the image from its own storage. - -### What if the content changes on the Hub? - -When a pull is attempted with a tag, the Registry will check the remote to ensure if it has the latest version of the requested content. If it doesn't it will fetch the latest content and cache it. - -### What about my disk? - -In environments with high churn rates, stale data can build up in the cache. When running as a pull through cache the Registry will periodically remove old content to save disk space. Subsequent requests for removed content will cause a remote fetch and local re-caching. - -To ensure best performance and guarantee correctness the Registry cache should be configured to use the `filesystem` driver for storage. - -## Running a Registry as a pull through cache - -The easiest way to run a registry as a pull through cache is to run the official Registry image. - -Multiple registry caches can be deployed over the same back-end. A single registry cache will ensure that concurrent requests do not pull duplicate data, but this property will not hold true for a registry cache cluster. - -### Configuring the cache - -To configure a Registry to run as a pull through cache, the addition of a `proxy` section is required to the config file. - -In order to access private images on the Docker Hub, a username and password can be supplied. - - proxy: - remoteurl: https://registry-1.docker.io - username: [username] - password: [password] - -> :warn: if you specify a username and password, it's very important to understand that private resources that this user has access to on the Hub will be made available on your mirror. It's thus paramount that you secure your mirror by implementing authentication if you expect these resources to stay private! - -### Configuring the Docker daemon - -You will need to pass the `--registry-mirror` option to your Docker daemon on startup: - - docker --registry-mirror=https:// daemon - -For example, if your mirror is serving on http://10.0.0.2:5000, you would run: - - docker --registry-mirror=https://10.0.0.2:5000 daemon - -NOTE: Depending on your local host setup, you may be able to add the `--registry-mirror` option to the `DOCKER_OPTS` variable in `/etc/default/docker`. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/nginx.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/nginx.md deleted file mode 100644 index 1e43d0e3d3de..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/nginx.md +++ /dev/null @@ -1,166 +0,0 @@ - - -# Authenticating proxy with nginx - - -## Use-case - -People already relying on a nginx proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline. - -Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal. - -### Alternatives - -If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](deploying.md#native-basic-auth). - -### Solution - -With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry. - -While we use a simple htpasswd file as an example, any other nginx authentication backend should be fairly easy to implement once you are done with the example. - -We also implement push restriction (to a limited user group) for the sake of the example. Again, you should modify this to fit your mileage. - -### Gotchas - -While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. - -Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues. Make sure the extra complexity is required. - -For instance, Amazon's Elastic Load Balancer (ELB) in HTTPS mode already sets the following client header: - -``` -X-Real-IP -X-Forwarded-For -X-Forwarded-Proto -``` - -So if you have an nginx sitting behind it, should remove these lines from the example config below: - -``` -X-Real-IP $remote_addr; # pass on real client's IP -X-Forwarded-For $proxy_add_x_forwarded_for; -X-Forwarded-Proto $scheme; -``` - -Otherwise nginx will reset the ELB's values, and the requests will not be routed properly. For more information, see [#970](https://github.com/docker/distribution/issues/970). - -## Setting things up - -Read again [the requirements](recipes.md#requirements). - -Ready? - -Run the following: - -``` -mkdir -p auth -mkdir -p data - -# This is the main nginx configuration you will use -cat < auth/nginx.conf -upstream docker-registry { - server registry:5000; -} - -## Set a variable to help us decide if we need to add the -## 'Docker-Distribution-Api-Version' header. -## The registry always sets this header. -## In the case of nginx performing auth, the header will be unset -## since nginx is auth-ing before proxying. -map \$upstream_http_docker_distribution_api_version \$docker_distribution_api_version { - 'registry/2.0' ''; - default registry/2.0; -} - -server { - listen 443 ssl; - server_name myregistrydomain.com; - - # SSL - ssl_certificate /etc/nginx/conf.d/domain.crt; - ssl_certificate_key /etc/nginx/conf.d/domain.key; - - # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html - ssl_protocols TLSv1.1 TLSv1.2; - ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; - ssl_prefer_server_ciphers on; - ssl_session_cache shared:SSL:10m; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location /v2/ { - # Do not allow connections from docker 1.5 and earlier - # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents - if (\$http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*\$" ) { - return 404; - } - - # To add basic authentication to v2 use auth_basic setting. - auth_basic "Registry realm"; - auth_basic_user_file /etc/nginx/conf.d/nginx.htpasswd; - - ## If $docker_distribution_api_version is empty, the header will not be added. - ## See the map directive above where this variable is defined. - add_header 'Docker-Distribution-Api-Version' $docker_distribution_api_version always; - - proxy_pass http://docker-registry; - proxy_set_header Host \$http_host; # required for docker client's sake - proxy_set_header X-Real-IP \$remote_addr; # pass on real client's IP - proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto \$scheme; - proxy_read_timeout 900; - } -} -EOF - -# Now, create a password file for "testuser" and "testpassword" -docker run --entrypoint htpasswd httpd:2.4 -bn testuser testpassword > auth/nginx.htpasswd - -# Copy over your certificate files -cp domain.crt auth -cp domain.key auth - -# Now create your compose file - -cat < docker-compose.yml -nginx: - image: "nginx:1.9" - ports: - - 5043:443 - links: - - registry:registry - volumes: - - `pwd`/auth/:/etc/nginx/conf.d - -registry: - image: registry:2 - ports: - - 127.0.0.1:5000:5000 - volumes: - - `pwd`/data:/var/lib/registry -EOF -``` - -## Starting and stopping - -Now, start your stack: - - docker-compose up -d - -Login with a "push" authorized user (using `testuserpush` and `testpasswordpush`), then tag and push your first image: - - docker login myregistrydomain.com:5043 - docker tag ubuntu myregistrydomain.com:5043/test - docker push myregistrydomain.com:5043/test - docker pull myregistrydomain.com:5043/test diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md deleted file mode 100644 index 9e2168062c4b..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/notifications.md +++ /dev/null @@ -1,320 +0,0 @@ - - -# Notifications - -The Registry supports sending webhook notifications in response to events -happening within the registry. Notifications are sent in response to manifest -pushes and pulls and layer pushes and pulls. These actions are serialized into -events. The events are queued into a registry-internal broadcast system which -queues and dispatches events to [_Endpoints_](#endpoints). - -![](images/notifications.png) - -## Endpoints - -Notifications are sent to _endpoints_ via HTTP requests. Each configured -endpoint has isolated queues, retry configuration and http targets within each -instance of a registry. When an action happens within the registry, it is -converted into an event which is dropped into an inmemory queue. When the -event reaches the end of the queue, an http request is made to the endpoint -until the request succeeds. The events are sent serially to each endpoint but -order is not guaranteed. - -## Configuration - -To setup a registry instance to send notifications to endpoints, one must add -them to the configuration. A simple example follows: - - notifications: - endpoints: - - name: alistener - url: https://mylistener.example.com/event - headers: - Authorization: [Bearer ] - timeout: 500ms - threshold: 5 - backoff: 1s - -The above would configure the registry with an endpoint to send events to -`https://mylistener.example.com/event`, with the header "Authorization: Bearer -". The request would timeout after 500 milliseconds. If -5 failures happen consecutively, the registry will backoff for 1 second before -trying again. - -For details on the fields, please see the [configuration documentation](configuration.md#notifications). - -A properly configured endpoint should lead to a log message from the registry -upon startup: - -``` -INFO[0000] configuring endpoint alistener (https://mylistener.example.com/event), timeout=500ms, headers=map[Authorization:[Bearer ]] app.id=812bfeb2-62d6-43cf-b0c6-152f541618a3 environment=development service=registry -``` - -## Events - -Events have a well-defined JSON structure and are sent as the body of -notification requests. One or more events are sent in a structure called an -envelope. Each event has a unique id that can be used to uniquely identify incoming -requests, if required. Along with that, an _action_ is provided with a -_target, identifying the object mutated during the event. - -The fields available in an event are described in detail in the -[godoc](http://godoc.org/github.com/docker/distribution/notifications#Event). - -**TODO:** Let's break out the fields here rather than rely on the godoc. - -The following is an example of a JSON event, sent in response to the push of a -manifest: - -```json -{ - "id": "asdf-asdf-asdf-asdf-0", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "size": 1, - "digest": "sha256:0123456789abcdef0", - "length": 1, - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } -} -``` - -> __NOTE:__ As of version 2.1, the `length` field for event targets -> is being deprecated for the `size` field, bringing the target in line with -> common nomenclature. Both will continue to be set for the foreseeable -> future. Newer code should favor `size` but accept either. - -## Envelope - -The envelope contains one or more events, with the following json structure: - -```json -{ - "events": [ ... ], -} -``` - -While events may be sent in the same envelope, the set of events within that -envelope have no implied relationship. For example, the registry may choose to -group unrelated events and send them in the same envelope to reduce the total -number of requests. - -The full package has the mediatype -"application/vnd.docker.distribution.events.v1+json", which will be set on the -request coming to an endpoint. - -An example of a full event may look as follows: - -```json -GET /callback -Host: application/vnd.docker.distribution.events.v1+json -Authorization: Bearer -Content-Type: application/vnd.docker.distribution.events.v1+json - -{ - "events": [ - { - "id": "asdf-asdf-asdf-asdf-0", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", - "length": 1, - "digest": "sha256:0123456789abcdef0", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - }, - { - "id": "asdf-asdf-asdf-asdf-1", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", - "length": 2, - "digest": "tarsum.v2+sha256:0123456789abcdef1", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - }, - { - "id": "asdf-asdf-asdf-asdf-2", - "timestamp": "2006-01-02T15:04:05Z", - "action": "push", - "target": { - "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", - "length": 3, - "digest": "tarsum.v2+sha256:0123456789abcdef2", - "repository": "library/test", - "url": "http://example.com/v2/library/test/manifests/latest" - }, - "request": { - "id": "asdfasdf", - "addr": "client.local", - "host": "registrycluster.local", - "method": "PUT", - "useragent": "test/0.1" - }, - "actor": { - "name": "test-actor" - }, - "source": { - "addr": "hostname.local:port" - } - } - ] -} -``` - -## Responses - -The registry is fairly accepting of the response codes from endpoints. If an -endpoint responds with any 2xx or 3xx response code (after following -redirects), the message will be considered delivered and discarded. - -In turn, it is recommended that endpoints are accepting of incoming responses, -as well. While the format of event envelopes are standardized by media type, -any "pickyness" about validation may cause the queue to backup on the -registry. - -## Monitoring - -The state of the endpoints are reported via the debug/vars http interface, -usually configured to `http://localhost:5001/debug/vars`. Information such as -configuration and metrics are available by endpoint. - -The following provides an example of a few endpoints that have experienced -several failures and have since recovered: - -```json -"notifications":{ - "endpoints":[ - { - "name":"local-5003", - "url":"http://localhost:5003/callback", - "Headers":{ - "Authorization":[ - "Bearer \u003can example token\u003e" - ] - }, - "Timeout":1000000000, - "Threshold":10, - "Backoff":1000000000, - "Metrics":{ - "Pending":76, - "Events":76, - "Successes":0, - "Failures":0, - "Errors":46, - "Statuses":{ - - } - } - }, - { - "name":"local-8083", - "url":"http://localhost:8083/callback", - "Headers":null, - "Timeout":1000000000, - "Threshold":10, - "Backoff":1000000000, - "Metrics":{ - "Pending":0, - "Events":76, - "Successes":76, - "Failures":0, - "Errors":28, - "Statuses":{ - "202 Accepted":76 - } - } - } - ] -} -``` - -If using notification as part of a larger application, it is _critical_ to -monitor the size ("Pending" above) of the endpoint queues. If failures or -queue sizes are increasing, it can indicate a larger problem. - -The logs are also a valuable resource for monitoring problems. A failing -endpoint will lead to messages similar to the following: - -``` -ERRO[0340] retryingsink: error writing events: httpSink{http://localhost:5003/callback}: error posting: Post http://localhost:5003/callback: dial tcp 127.0.0.1:5003: connection refused, retrying -WARN[0340] httpSink{http://localhost:5003/callback} encountered too many errors, backing off -``` - -The above indicates that several errors have led to a backoff and the registry -will wait before retrying. - -## Considerations - -Currently, the queues are inmemory, so endpoints should be _reasonably -reliable_. They are designed to make a best-effort to send the messages but if -an instance is lost, messages may be dropped. If an endpoint goes down, care -should be taken to ensure that the registry instance is not terminated before -the endpoint comes back up or messages will be lost. - -This can be mitigated by running endpoints in close proximity to the registry -instances. One could run an endpoint that pages to disk and then forwards a -request to provide better durability. - -The notification system is designed around a series of interchangeable _sinks_ -which can be wired up to achieve interesting behavior. If this system doesn't -provide acceptable guarantees, adding a transactional `Sink` to the registry -is a possibility, although it may have an effect on request service time. -Please see the -[godoc](http://godoc.org/github.com/docker/distribution/notifications#Sink) -for more information. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md deleted file mode 100644 index 833db4117312..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx-setup-guide.md +++ /dev/null @@ -1,79 +0,0 @@ - - -# OS X Setup Guide - -## Use-case - -This is useful if you intend to run a registry server natively on OS X. - -### Alternatives - -You can start a VM on OS X, and deploy your registry normally as a container using Docker inside that VM. - -The simplest road to get there is traditionally to use the [docker Toolbox](https://www.docker.com/toolbox), or [docker-machine](https://docs.docker.com/machine/), which usually relies on the [boot2docker](http://boot2docker.io/) iso inside a VirtualBox VM. - -### Solution - -Using the method described here, you install and compile your own from the git repository and run it as an OS X agent. - -### Gotchas - -Production services operation on OS X is out of scope of this document. Be sure you understand well these aspects before considering going to production with this. - -## Setup golang on your machine - -If you know, safely skip to the next section. - -If you don't, the TLDR is: - - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer) - source ~/.gvm/scripts/gvm - gvm install go1.4.2 - gvm use go1.4.2 - -If you want to understand, you should read [How to Write Go Code](https://golang.org/doc/code.html). - -## Checkout the Docker Distribution source tree - - mkdir -p $GOPATH/src/github.com/docker - git clone https://github.com/docker/distribution.git $GOPATH/src/github.com/docker/distribution - cd $GOPATH/src/github.com/docker/distribution - -## Build the binary - - GOPATH=$(PWD)/Godeps/_workspace:$GOPATH make binaries - sudo cp bin/registry /usr/local/libexec/registry - -## Setup - -Copy the registry configuration file in place: - - mkdir /Users/Shared/Registry - cp docs/osx/config.yml /Users/Shared/Registry/config.yml - -## Running the Docker Registry under launchd - -Copy the Docker registry plist into place: - - plutil -lint docs/osx/com.docker.registry.plist - cp docs/osx/com.docker.registry.plist ~/Library/LaunchAgents/ - chmod 644 ~/Library/LaunchAgents/com.docker.registry.plist - -Start the Docker registry: - - launchctl load ~/Library/LaunchAgents/com.docker.registry.plist - -### Restarting the docker registry service - - launchctl stop com.docker.registry - launchctl start com.docker.registry - -### Unloading the docker registry service - - launchctl unload ~/Library/LaunchAgents/com.docker.registry.plist diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/com.docker.registry.plist b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/com.docker.registry.plist deleted file mode 100644 index 0982349f4d5b..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/com.docker.registry.plist +++ /dev/null @@ -1,42 +0,0 @@ - - - - - Label - com.docker.registry - KeepAlive - - StandardErrorPath - /Users/Shared/Registry/registry.log - StandardOutPath - /Users/Shared/Registry/registry.log - Program - /usr/local/libexec/registry - ProgramArguments - - /usr/local/libexec/registry - /Users/Shared/Registry/config.yml - - Sockets - - http-listen-address - - SockServiceName - 5000 - SockType - dgram - SockFamily - IPv4 - - http-debug-address - - SockServiceName - 5001 - SockType - dgram - SockFamily - IPv4 - - - - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/config.yml b/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/config.yml deleted file mode 100644 index 63b8f713531b..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/osx/config.yml +++ /dev/null @@ -1,16 +0,0 @@ -version: 0.1 -log: - level: info - fields: - service: registry - environment: macbook-air -storage: - cache: - blobdescriptor: inmemory - filesystem: - rootdirectory: /Users/Shared/Registry -http: - addr: 0.0.0.0:5000 - secret: mytokensecret - debug: - addr: localhost:5001 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/recipes.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/recipes.md deleted file mode 100644 index 1dab46a03932..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/recipes.md +++ /dev/null @@ -1,35 +0,0 @@ - - -# Recipes - -You will find here a list of "recipes", end-to-end scenarios for exotic or otherwise advanced use-cases. - -Most users are not expected to have a use for these. - -## Requirements - -You should have followed entirely the basic [deployment guide](deploying.md). - -If you have not, please take the time to do so. - -At this point, it's assumed that: - - * you understand Docker security requirements, and how to configure your docker engines properly - * you have installed Docker Compose - * it's HIGHLY recommended that you get a certificate from a known CA instead of self-signed certificates - * inside the current directory, you have a X509 `domain.crt` and `domain.key`, for the CN `myregistrydomain.com` - * be sure you have stopped and removed any previously running registry (typically `docker stop registry && docker rm -v registry`) - -## The List - - * [using Apache as an authenticating proxy](apache.md) - * [using Nginx as an authenticating proxy](nginx.md) - * [running a Registry on OS X](osx-setup-guide.md) - * [hacking the registry: build instructions](building.md) - * [mirror the Docker Hub](mirror.md) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md deleted file mode 100644 index 51db6046e9ba..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md +++ /dev/null @@ -1,4570 +0,0 @@ - - -# Docker Registry HTTP API V2 - -## Introduction - -The _Docker Registry HTTP API_ is the protocol to facilitate distribution of -images to the docker engine. It interacts with instances of the docker -registry, which is a service to manage information about docker images and -enable their distribution. The specification covers the operation of version 2 -of this API, known as _Docker Registry HTTP API V2_. - -While the V1 registry protocol is usable, there are several problems with the -architecture that have led to this new version. The main driver of this -specification these changes to the docker the image format, covered in -[docker/docker#8093](https://github.com/docker/docker/issues/8093). The new, self-contained image manifest simplifies image -definition and improves security. This specification will build on that work, -leveraging new properties of the manifest format to improve performance, -reduce bandwidth usage and decrease the likelihood of backend corruption. - -For relevant details and history leading up to this specification, please see -the following issues: - -- [docker/docker#8093](https://github.com/docker/docker/issues/8093) -- [docker/docker#9015](https://github.com/docker/docker/issues/9015) -- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) - -### Scope - -This specification covers the URL layout and protocols of the interaction -between docker registry and docker core. This will affect the docker core -registry API and the rewrite of docker-registry. Docker registry -implementations may implement other API endpoints, but they are not covered by -this specification. - -This includes the following features: - -- Namespace-oriented URI Layout -- PUSH/PULL registry server for V2 image manifest format -- Resumable layer PUSH support -- V2 Client library implementation - -While authentication and authorization support will influence this -specification, details of the protocol will be left to a future specification. -Relevant header definitions and error codes are present to provide an -indication of what a client may encounter. - -#### Future - -There are features that have been discussed during the process of cutting this -specification. The following is an incomplete list: - -- Immutable image references -- Multiple architecture support -- Migration from v2compatibility representation - -These may represent features that are either out of the scope of this -specification, the purview of another specification or have been deferred to a -future version. - -### Use Cases - -For the most part, the use cases of the former registry API apply to the new -version. Differentiating use cases are covered below. - -#### Image Verification - -A docker engine instance would like to run verified image named -"library/ubuntu", with the tag "latest". The engine contacts the registry, -requesting the manifest for "library/ubuntu:latest". An untrusted registry -returns a manifest. Before proceeding to download the individual layers, the -engine verifies the manifest's signature, ensuring that the content was -produced from a trusted source and no tampering has occured. After each layer -is downloaded, the engine verifies the digest of the layer, ensuring that the -content matches that specified by the manifest. - -#### Resumable Push - -Company X's build servers lose connectivity to docker registry before -completing an image layer transfer. After connectivity returns, the build -server attempts to re-upload the image. The registry notifies the build server -that the upload has already been partially attempted. The build server -responds by only sending the remaining data to complete the image file. - -#### Resumable Pull - -Company X is having more connectivity problems but this time in their -deployment datacenter. When downloading an image, the connection is -interrupted before completion. The client keeps the partial data and uses http -`Range` requests to avoid downloading repeated data. - -#### Layer Upload De-duplication - -Company Y's build system creates two identical docker layers from build -processes A and B. Build process A completes uploading the layer before B. -When process B attempts to upload the layer, the registry indicates that its -not necessary because the layer is already known. - -If process A and B upload the same layer at the same time, both operations -will proceed and the first to complete will be stored in the registry (Note: -we may modify this to prevent dogpile with some locking mechanism). - -### Changes - -The V2 specification has been written to work as a living document, specifying -only what is certain and leaving what is not specified open or to future -changes. Only non-conflicting additions should be made to the API and accepted -changes should avoid preventing future changes from happening. - -This section should be updated when changes are made to the specification, -indicating what is different. Optionally, we may start marking parts of the -specification to correspond with the versions enumerated here. - -Each set of changes is given a letter corresponding to a set of modifications -that were applied to the baseline specification. These are merely for -reference and shouldn't be used outside the specification other than to -identify a set of modifications. - -
-
f
-
-
    -
  • Specify the delete API for layers and manifests.
  • -
-
- -
e
-
-
    -
  • Added support for listing registry contents.
  • -
  • Added pagination to tags API.
  • -
  • Added common approach to support pagination.
  • -
-
- -
d
-
-
    -
  • Allow repository name components to be one character.
  • -
  • Clarified that single component names are allowed.
  • -
-
- -
c
-
-
    -
  • Added section covering digest format.
  • -
  • Added more clarification that manifest cannot be deleted by tag.
  • -
-
- -
b
-
-
    -
  • Added capability of doing streaming upload to PATCH blob upload.
  • -
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • -
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • -
-
- -
a
-
-
    -
  • Added support for immutable manifest references in manifest endpoints.
  • -
  • Deleting a manifest by tag has been deprecated.
  • -
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • -
  • Added error code for unsupported operations.
  • -
-
-
- -## Overview - -This section covers client flows and details of the API endpoints. The URI -layout of the new API is structured to support a rich authentication and -authorization model by leveraging namespaces. All endpoints will be prefixed -by the API version and the repository name: - - /v2// - -For example, an API endpoint that will work with the `library/ubuntu` -repository, the URI prefix will be: - - /v2/library/ubuntu/ - -This scheme provides rich access control over various operations and methods -using the URI prefix and http methods that can be controlled in variety of -ways. - -Classically, repository names have always been two path components where each -path component is less than 30 characters. The V2 registry API does not -enforce this. The rules for a repository name are as follows: - -1. A repository name is broken up into _path components_. A component of a - repository name must be at least one lowercase, alpha-numeric characters, - optionally separated by periods, dashes or underscores. More strictly, it - must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. -2. If a repository name has two or more path components, they must be - separated by a forward slash ("/"). -3. The total length of a repository name, including slashes, must be less the - 256 characters. - -These name requirements _only_ apply to the registry API and should accept a -superset of what is supported by other docker ecosystem components. - -All endpoints should support aggressive http caching, compression and range -headers, where appropriate. The new API attempts to leverage HTTP semantics -where possible but may break from standards to implement targeted features. - -For detail on individual endpoints, please see the [_Detail_](#detail) -section. - -### Errors - -Actionable failure conditions, covered in detail in their relevant sections, -are reported as part of 4xx responses, in a json response body. One or more -errors will be returned in the following format: - - { - "errors:" [{ - "code": , - "message": , - "detail": - }, - ... - ] - } - -The `code` field will be a unique identifier, all caps with underscores by -convention. The `message` field will be a human readable string. The optional -`detail` field may contain arbitrary json data providing information the -client can use to resolve the issue. - -While the client can take action on certain error codes, the registry may add -new error codes over time. All client implementations should treat unknown -error codes as `UNKNOWN`, allowing future error codes to be added without -breaking API compatibility. For the purposes of the specification error codes -will only be added and never removed. - -For a complete account of all error codes, please see the _Detail_ section. - -### API Version Check - -A minimal endpoint, mounted at `/v2/` will provide version support information -based on its response statuses. The request format is as follows: - - GET /v2/ - -If a `200 OK` response is returned, the registry implements the V2(.1) -registry API and the client may proceed safely with other V2 operations. -Optionally, the response may contain information about the supported paths in -the response body. The client should be prepared to ignore this data. - -If a `401 Unauthorized` response is returned, the client should take action -based on the contents of the "WWW-Authenticate" header and try the endpoint -again. Depending on access control setup, the client may still have to -authenticate against different resources, even if this check succeeds. - -If `404 Not Found` response status, or other unexpected status, is returned, -the client should proceed with the assumption that the registry does not -implement V2 of the API. - -When a `200 OK` or `401 Unauthorized` response is returned, the -"Docker-Distribution-API-Version" header should be set to "registry/2.0". -Clients may require this header value to determine if the endpoint serves this -API. When this header is omitted, clients may fallback to an older API version. - -### Content Digests - -This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). -The core of this design is the concept of a content addressable identifier. It -uniquely identifies content by taking a collision-resistant hash of the bytes. -Such an identifier can be independently calculated and verified by selection -of a common _algorithm_. If such an identifier can be communicated in a secure -manner, one can retrieve the content from an insecure source, calculate it -independently and be certain that the correct content was obtained. Put simply, -the identifier is a property of the content. - -To disambiguate from other concepts, we call this identifier a _digest_. A -_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ -portion. The _algorithm_ identifies the methodology used to calculate the -digest. The _hex_ portion is the hex-encoded result of the hash. - -We define a _digest_ string to match the following grammar: -``` -digest := algorithm ":" hex -algorithm := /[A-Fa-f0-9_+.-]+/ -hex := /[A-Fa-f0-9]+/ -``` - -Some examples of _digests_ include the following: - -digest | description | -----------------------------------------------------------------------------------|------------------------------------------------ -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | -tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Tarsum digest, used for legacy layer digests. | - -> __NOTE:__ While we show an example of using a `tarsum` digest, the security -> of tarsum has not been verified. It is recommended that most implementations -> use sha256 for interoperability. - -While the _algorithm_ does allow one to implement a wide variety of -algorithms, compliant implementations should use sha256. Heavy processing of -input before calculating a hash is discouraged to avoid degrading the -uniqueness of the _digest_ but some canonicalization may be performed to -ensure consistent identifiers. - -Let's use a simple example in pseudo-code to demonstrate a digest calculation: -``` -let C = 'a small string' -let B = sha256(C) -let D = 'sha256:' + EncodeHex(B) -let ID(C) = D -``` - -Above, we have bytestring `C` passed into a function, `SHA256`, that returns a -bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated -with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` -as equal to `D`. A digest can be verified by independently calculating `D` and -comparing it with identifier `ID(C)`. - -#### Digest Header - -To provide verification of http content, any response may include a `Docker- -Content-Digest` header. This will include the digest of the target entity -returned in the response. For blobs, this is the entire blob content. For -manifests, this is the manifest body without the signature content, also known -as the JWS payload. Note that the commonly used canonicalization for digest -calculation may be dependent on the mediatype of the content, such as with -manifests. - -The client may choose to ignore the header or may verify it to ensure content -integrity and transport security. This is most important when fetching by a -digest. To ensure security, the content should be verified against the digest -used to fetch the content. At times, the returned digest may differ from that -used to initiate a request. Such digests are considered to be from different -_domains_, meaning they have different values for _algorithm_. In such a case, -the client may choose to verify the digests in both domains or ignore the -server's digest. To maintain security, the client _must_ always verify the -content against the _digest_ used to fetch the content. - -> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use -> the same digest used to fetch the content to verify it. The header `Docker- -> Content-Digest` should not be trusted over the "local" digest. - -### Pulling An Image - -An "image" is a combination of a JSON manifest and individual layer files. The -process of pulling an image centers around retrieving these two components. - -The first step in pulling an image is to retrieve the manifest. For reference, -the relevant manifest fields for the registry are the following: - - field | description | -----------|------------------------------------------------| -name | The name of the image. | -tag | The tag for this version of the image. | -fsLayers | A list of layer descriptors (including tarsum) | -signature | A JWS used to verify the manifest content | - -For more information about the manifest format, please see -[docker/docker#8093](https://github.com/docker/docker/issues/8093). - -When the manifest is in hand, the client must verify the signature to ensure -the names and layers are valid. Once confirmed, the client will then use the -tarsums to download the individual layers. Layers are stored in as blobs in -the V2 registry API, keyed by their tarsum digest. - -#### Pulling an Image Manifest - -The image manifest can be fetched with the following url: - -``` -GET /v2//manifests/ -``` - -The `name` and `reference` parameter identify the image and are required. The -reference may include a tag or digest. - -A `404 Not Found` response will be returned if the image is unknown to the -registry. If the image exists and the response is successful, the image -manifest will be returned, with the following format (see docker/docker#8093 -for details): - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": - } - -The client should verify the returned manifest signature for authenticity -before fetching layers. - -#### Pulling a Layer - -Layers are stored in the blob portion of the registry, keyed by tarsum digest. -Pulling a layer is carried out by a standard http request. The URL is as -follows: - - GET /v2//blobs/ - -Access to a layer will be gated by the `name` of the repository but is -identified uniquely in the registry by `tarsum`. The `tarsum` parameter is an -opaque field, to be interpreted by the tarsum library. - -This endpoint may issue a 307 (302 for /blobs/uploads/ -``` - -The parameters of this request are the image namespace under which the layer -will be linked. Responses to this request are covered below. - -##### Existing Layers - -The existence of a layer can be checked via a `HEAD` request to the blob store -API. The request should be formatted as follows: - -``` -HEAD /v2//blobs/ -``` - -If the layer with the tarsum specified in `digest` is available, a 200 OK -response will be received, with no actual body content (this is according to -http specification). The response will look as follows: - -``` -200 OK -Content-Length: -Docker-Content-Digest: -``` - -When this response is received, the client can assume that the layer is -already available in the registry under the given name and should take no -further action to upload the layer. Note that the binary digests may differ -for the existing registry layer, but the tarsums will be guaranteed to match. - -##### Uploading the Layer - -If the POST request is successful, a `202 Accepted` response will be returned -with the upload URL in the `Location` header: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The rest of the upload process can be carried out with the returned url, -called the "Upload URL" from the `Location` header. All responses to the -upload url, whether sending data or getting status, will be in this format. -Though the URI format (`/v2//blobs/uploads/`) for the `Location` -header is specified, clients should treat it as an opaque url and should never -try to assemble the it. While the `uuid` parameter may be an actual UUID, this -proposal imposes no constraints on the format and clients should never impose -any. - -If clients need to correlate local upload state with remote upload state, the -contents of the `Docker-Upload-UUID` header should be used. Such an id can be -used to key the last used location header when implementing resumable uploads. - -##### Upload Progress - -The progress and chunk coordination of the upload process will be coordinated -through the `Range` header. While this is a non-standard use of the `Range` -header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. -For an upload that just started, for an example with a 1000 byte layer file, -the `Range` header would be as follows: - -``` -Range: bytes=0-0 -``` - -To get the status of an upload, issue a GET request to the upload URL: - -``` -GET /v2//blobs/uploads/ -Host: -``` - -The response will be similar to the above, except will return 204 status: - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: bytes=0- -Docker-Upload-UUID: -``` - -Note that the HTTP `Range` header byte ranges are inclusive and that will be -honored, even in non-standard use cases. - -##### Monolithic Upload - -A monolithic upload is simply a chunked upload with a single chunk and may be -favored by clients that would like to avoided the complexity of chunking. To -carry out a "monolithic" upload, one can simply put the entire content blob to -the provided URL: - -``` -PUT /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -The "digest" parameter must be included with the PUT request. Please see the -_Completed Upload_ section for details on the parameters and expected -responses. - -Additionally, the upload can be completed with a single `POST` request to -the uploads endpoint, including the "size" and "digest" parameters: - -``` -POST /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -On the registry service, this should allocate a download, accept and verify -the data and return the same response as the final chunk of an upload. If the -POST request fails collecting the data in any way, the registry should attempt -to return an error response to the client with the `Location` header providing -a place to continue the download. - -The single `POST` method is provided for convenience and most clients should -implement `POST` + `PUT` to support reliable resume of uploads. - -##### Chunked Upload - -To carry out an upload of a chunk, the client can specify a range header and -only include that part of the layer file: - -``` -PATCH /v2//blobs/uploads/ -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -There is no enforcement on layer chunk splits other than that the server must -receive them in order. The server may enforce a minimum chunk size. If the -server cannot accept the chunk, a `416 Requested Range Not Satisfiable` -response will be returned and will include a `Range` header indicating the -current status: - -``` -416 Requested Range Not Satisfiable -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -If this response is received, the client should resume from the "last valid -range" and upload the subsequent chunk. A 416 will be returned under the -following conditions: - -- Invalid Content-Range header format -- Out of order chunk: the range of the next chunk must start immediately after - the "last valid range" from the previous response. - -When a chunk is accepted as part of the upload, a `202 Accepted` response will -be returned, including a `Range` header with the current upload status: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -##### Completed Upload - -For an upload to be considered complete, the client must submit a `PUT` -request on the upload endpoint with a digest parameter. If it is not provided, -the upload will not be considered complete. The format for the final chunk -will be as follows: - -``` -PUT /v2//blob/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -Optionally, if all chunks have already been uploaded, a `PUT` request with a -`digest` parameter and zero-length body may be sent to complete and validated -the upload. Multiple "digest" parameters may be provided with different -digests. The server may verify none or all of them but _must_ notify the -client if the content is rejected. - -When the last chunk is received and the layer has been validated, the client -will receive a `201 Created` response: - -``` -201 Created -Location: /v2//blobs/ -Content-Length: 0 -Docker-Content-Digest: -``` - -The `Location` header will contain the registry URL to access the accepted -layer file. The `Docker-Content-Digest` header returns the canonical digest of -the uploaded blob which may differ from the provided digest. Most clients may -ignore the value but if it is used, the client should verify the value against -the uploaded blob data. - -###### Digest Parameter - -The "digest" parameter is designed as an opaque parameter to support -verification of a successful transfer. The initial version of the registry API -will support a tarsum digest, in the standard tarsum format. For example, a -HTTP URI parameter might be as follows: - -``` -tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Given this parameter, the registry will verify that the provided content does -result in this tarsum. Optionally, the registry can support other other digest -parameters for non-tarfile content stored as a layer. A regular hash digest -might be specified as follows: - -``` -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Such a parameter would be used to verify that the binary content (as opposed -to the tar content) would be verified at the end of the upload process. - -For the initial version, registry servers are only required to support the -tarsum format. - -##### Canceling an Upload - -An upload can be cancelled by issuing a DELETE request to the upload endpoint. -The format will be as follows: - -``` -DELETE /v2//blobs/uploads/ -``` - -After this request is issued, the upload uuid will no longer be valid and the -registry server will dump all intermediate data. While uploads will time out -if not completed, clients should issue this request if they encounter a fatal -error but still have the ability to issue an http request. - -##### Errors - -If an 502, 503 or 504 error is received, the client should assume that the -download can proceed due to a temporary condition, honoring the appropriate -retry mechanism. Other 5xx errors should be treated as terminal. - -If there is a problem with the upload, a 4xx error will be returned indicating -the problem. After receiving a 4xx response (except 416, as called out above), -the upload will be considered failed and the client should take appropriate -action. - -Note that the upload url will not be available forever. If the upload uuid is -unknown to the registry, a `404 Not Found` response will be returned and the -client must restart the upload process. - -### Deleting a Layer - -A layer may be deleted from the registry via its `name` and `digest`. A -delete may be issued with the following request format: - - DELETE /v2//blobs/ - -If the blob exists and has been successfully deleted, the following response -will be issued: - - 202 Accepted - Content-Length: None - -If the blob had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -If a layer is deleted which is referenced by a manifest in the registry, -then the complete images will not be resolvable. - -#### Pushing an Image Manifest - -Once all of the layers for an image are uploaded, the client can upload the -image manifest. An image can be pushed using the following request format: - - PUT /v2//manifests/ - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": , - ... - } - -The `name` and `reference` fields of the response body must match those specified in -the URL. The `reference` field may be a "tag" or a "digest". - -If there is a problem with pushing the manifest, a relevant 4xx response will -be returned with a JSON error message. Please see the _PUT Manifest section -for details on possible error codes that may be returned. - -If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are -returned. The `detail` field of the error response will have a `digest` field -identifying the missing blob, which will be a tarsum. An error is returned for -each unknown blob. The response format is as follows: - - { - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": - } - }, - ... - ] - } - -### Listing Repositories - -Images are stored in collections, known as a _repository_, which is keyed by a -`name`, as seen throughout the API specification. A registry instance may -contain several repositories. The list of available repositories is made -available through the _catalog_. - -The catalog for a given registry can be retrieved with the following request: - -``` -GET /v2/_catalog -``` - -The response will be in the following format: - -``` -200 OK -Content-Type: application/json - -{ - "repositories": [ - , - ... - ] -} -``` - -Note that the contents of the response are specific to the registry -implementation. Some registries may opt to provide a full catalog output, -limit it based on the user's access level or omit upstream results, if -providing mirroring functionality. Subsequently, the presence of a repository -in the catalog listing only means that the registry *may* provide access to -the repository at the time of the request. Conversely, a missing entry does -*not* mean that the registry does not have the repository. More succinctly, -the presence of a repository only guarantees that it is there but not that it -is _not_ there. - -For registries with a large number of repositories, this response may be quite -large. If such a response is expected, one should use pagination. A registry -may also limit the amount of responses returned even if pagination was not -explicitly requested. In this case the `Link` header will be returned along -with the results, and subsequent results can be obtained by following the link -as if pagination had been initially requested. - -For details of the `Link` header, please see the _Pagination_ section. - -#### Pagination - -Paginated catalog results can be retrieved by adding an `n` parameter to the -request URL, declaring that the response should be limited to `n` results. -Starting a paginated flow begins as follows: - -``` -GET /v2/_catalog?n= -``` - -The above specifies that a catalog response should be returned, from the start of -the result set, ordered lexically, limiting the number of results to `n`. The -response to such a request would look as follows: - -``` -200 OK -Content-Type: application/json -Link: <?n=&last=>; rel="next" - -{ - "repositories": [ - , - ... - ] -} -``` - -The above includes the _first_ `n` entries from the result set. To get the -_next_ `n` entries, one can create a URL where the argument `last` has the -value from `repositories[len(repositories)-1]`. If there are indeed more -results, the URL for the next block is encoded in an -[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" -relation. The presence of the `Link` header communicates to the client that -the entire result set has not been returned and another request must be -issued. If the header is not present, the client can assume that all results -have been recieved. - -> __NOTE:__ In the request template above, note that the brackets -> are required. For example, if the url is -> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would -> be `; rel="next"`. Please see -> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. - -Compliant client implementations should always use the `Link` header -value when proceeding through results linearly. The client may construct URLs -to skip forward in the catalog. - -To get the next result set, a client would issue the request as follows, using -the URL encoded in the described `Link` header: - -``` -GET /v2/_catalog?n=&last= -``` - -The above process should then be repeated until the `Link` header is no longer -set. - -The catalog result set is represented abstractly as a lexically sorted list, -where the position in that list can be specified by the query term `last`. The -entries in the response start _after_ the term specified by `last`, up to `n` -entries. - -The behavior of `last` is quite simple when demonstrated with an example. Let -us say the registry has the following repositories: - -``` -a -b -c -d -``` - -If the value of `n` is 2, _a_ and _b_ will be returned on the first response. -The `Link` header returned on the response will have `n` set to 2 and last set -to _b_: - -``` -Link: <?n=2&last=b>; rel="next" -``` - -The client can then issue the request with above value from the `Link` header, -receiving the values _c_ and _d_. Note that n may change on second to last -response or be omitted fully, if the server may so choose. - -### Listing Image Tags - -It may be necessary to list all of the tags under a given repository. The tags -for an image repository can be retrieved with the following request: - - GET /v2//tags/list - -The response will be in the following format: - - 200 OK - Content-Type: application/json - - { - "name": , - "tags": [ - , - ... - ] - } - -For repositories with a large number of tags, this response may be quite -large. If such a response is expected, one should use the pagination. - -#### Pagination - -Paginated tag results can be retrieved by adding the appropriate parameters to -the request URL described above. The behavior of tag pagination is identical -to that specified for catalog pagination. We cover a simple flow to highlight -any differences. - -Starting a paginated flow may begin as follows: - -``` -GET /v2//tags/list?n= -``` - -The above specifies that a tags response should be returned, from the start of -the result set, ordered lexically, limiting the number of results to `n`. The -response to such a request would look as follows: - -``` -200 OK -Content-Type: application/json -Link: <?n=&last=>; rel="next" - -{ - "name": , - "tags": [ - , - ... - ] -} -``` - -To get the next result set, a client would issue the request as follows, using -the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` -header: - -``` -GET /v2//tags/list?n=&last= -``` - -The above process should then be repeated until the `Link` header is no longer -set in the response. The behavior of the `last` parameter, the provided -response result, lexical ordering and encoding of the `Link` header are -identical to that of catalog pagination. - -### Deleting an Image - -An image may be deleted from the registry via its `name` and `reference`. A -delete may be issued with the following request format: - - DELETE /v2//manifests/ - -For deletes, `reference` *must* be a digest or the delete will fail. If the -image exists and has been successfully deleted, the following response will be -issued: - - 202 Accepted - Content-Length: None - -If the image had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -## Detail - -> **Note**: This section is still under construction. For the purposes of -> implementation, if any details below differ from the described request flows -> above, the section below should be corrected. When they match, this note -> should be removed. - -The behavior of the endpoints are covered in detail in this section, organized -by route and entity. All aspects of the request and responses are covered, -including headers, parameters and body formats. Examples of requests and their -corresponding responses, with success and failure, are enumerated. - -> **Note**: The sections on endpoint detail are arranged with an example -> request, a description of the request, followed by information about that -> request. - -A list of methods and URIs are covered in the table below: - -|Method|Path|Entity|Description| -|------|----|------|-----------| -| GET | `/v2/` | Base | Check that the endpoint implements Docker Registry API V2. | -| GET | `/v2//tags/list` | Tags | Fetch the tags under the repository identified by `name`. | -| GET | `/v2//manifests/` | Manifest | Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | -| PUT | `/v2//manifests/` | Manifest | Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | -| DELETE | `/v2//manifests/` | Manifest | Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. | -| GET | `/v2//blobs/` | Blob | Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | -| DELETE | `/v2//blobs/` | Blob | Delete the blob identified by `name` and `digest` | -| POST | `/v2//blobs/uploads/` | Initiate Blob Upload | Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. | -| GET | `/v2//blobs/uploads/` | Blob Upload | Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. | -| PATCH | `/v2//blobs/uploads/` | Blob Upload | Upload a chunk of data for the specified upload. | -| PUT | `/v2//blobs/uploads/` | Blob Upload | Complete the upload specified by `uuid`, optionally appending the body as the final chunk. | -| DELETE | `/v2//blobs/uploads/` | Blob Upload | Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. | -| GET | `/v2/_catalog` | Catalog | Retrieve a sorted, json list of repositories available in the registry. | - - -The detail for each endpoint is covered in the following sections. - -### Errors - -The error codes encountered via the API are enumerated in the following table: - -|Code|Message|Description| -|----|-------|-----------| - `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. - `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. - `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. - `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. - `MANIFEST_BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a manifest blob is unknown to the registry. - `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. - `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. - `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. - `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. - `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. - `SIZE_INVALID` | provided length did not match content length | When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned. - `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. - `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. - `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. - `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. - - - -### Base - -Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication. - - - -#### GET Base - -Check that the endpoint implements Docker Registry API V2. - - - -``` -GET /v2/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| - - - - -###### On Success: OK - -``` -200 OK -``` - -The API implements V2 protocol and is accessible. - - - - -###### On Failure: Not Found - -``` -404 Not Found -``` - -The registry does not implement the V2 API. - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - - - -### Tags - -Retrieve information about tags. - - - -#### GET Tags - -Fetch the tags under the repository identified by `name`. - - -##### Tags - -``` -GET /v2//tags/list -Host: -Authorization: -``` - -Return all tags for the repository - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tags": [ - , - ... - ] -} -``` - -A list of tags for the named repository. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - -##### Tags Paginated - -``` -GET /v2//tags/list?n=&last= -``` - -Return a portion of the tags for the specified repository. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`name`|path|Name of the target repository.| -|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| -|`last`|query|Result set will include values lexically after last.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Link: <?n=&last=>; rel="next" -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tags": [ - , - ... - ], -} -``` - -A list of tags for the named repository. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| -|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| - - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - - -### Manifest - -Create, update, delete and retrieve manifests. - - - -#### GET Manifest - -Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. - - - -``` -GET /v2//manifests/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`reference`|path|Tag or digest of the target manifest.| - - - - -###### On Success: OK - -``` -200 OK -Docker-Content-Digest: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -} -``` - -The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The name or reference was invalid. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - -#### PUT Manifest - -Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. - - - -``` -PUT /v2//manifests/ -Host: -Authorization: -Content-Type: application/json; charset=utf-8 - -{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -} -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`reference`|path|Tag or digest of the target manifest.| - - - - -###### On Success: Created - -``` -201 Created -Location: -Content-Length: 0 -Docker-Content-Digest: -``` - -The manifest has been accepted by the registry and is stored under the specified `name` and `tag`. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The canonical location url of the uploaded manifest.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Invalid Manifest - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | -| `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. | -| `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - -###### On Failure: Missing Layer(s) - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": "" - } - }, - ... - ] -} -``` - -One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Not allowed - -``` -405 Method Not Allowed -``` - -Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | - - - - -#### DELETE Manifest - -Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. - - - -``` -DELETE /v2//manifests/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`reference`|path|Tag or digest of the target manifest.| - - - - -###### On Success: Accepted - -``` -202 Accepted -``` - - - - - - -###### On Failure: Invalid Name or Reference - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The specified `name` or `reference` were invalid and the delete was unable to proceed. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - -###### On Failure: Unknown Manifest - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | - - - -###### On Failure: Not allowed - -``` -405 Method Not Allowed -``` - -Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | - - - - - -### Blob - -Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest. - - - -#### GET Blob - -Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. - - -##### Fetch Blob - -``` -GET /v2//blobs/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`digest`|path|Digest of desired blob.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Docker-Content-Digest: -Content-Type: application/octet-stream - - -``` - -The blob identified by `digest` is available. The blob content will be present in the body of the request. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The length of the requested blob content.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - -###### On Success: Temporary Redirect - -``` -307 Temporary Redirect -Location: -Docker-Content-Digest: -``` - -The blob identified by `digest` is available at the provided location. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The location where the layer should be accessible.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The blob, identified by `name` and `digest`, is unknown to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - -##### Fetch Blob Part - -``` -GET /v2//blobs/ -Host: -Authorization: -Range: bytes=- -``` - -This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Range`|header|HTTP Range header specifying blob chunk.| -|`name`|path|Name of the target repository.| -|`digest`|path|Digest of desired blob.| - - - - -###### On Success: Partial Content - -``` -206 Partial Content -Content-Length: -Content-Range: bytes -/ -Content-Type: application/octet-stream - - -``` - -The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The length of the requested blob chunk.| -|`Content-Range`|Content range of blob chunk.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Requested Range Not Satisfiable - -``` -416 Requested Range Not Satisfiable -``` - -The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content. - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - -#### DELETE Blob - -Delete the blob identified by `name` and `digest` - - - -``` -DELETE /v2//blobs/ -Host: -Authorization: -``` - - - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`digest`|path|Digest of desired blob.| - - - - -###### On Success: Accepted - -``` -202 Accepted -Content-Length: 0 -Docker-Content-Digest: -``` - - - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|0| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Invalid Name or Digest - -``` -400 Bad Request -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The blob, identified by `name` and `digest`, is unknown to the registry. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | -| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | - - - -###### On Failure: Method Not Allowed - -``` -405 Method Not Allowed -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - - -### Initiate Blob Upload - -Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads. - - - -#### POST Initiate Blob Upload - -Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. - - -##### Initiate Monolithic Blob Upload - -``` -POST /v2//blobs/uploads/?digest= -Host: -Authorization: -Content-Length: -Content-Type: application/octect-stream - - -``` - -Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|| -|`name`|path|Name of the target repository.| -|`digest`|query|Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.| - - - - -###### On Success: Created - -``` -201 Created -Location: -Content-Length: 0 -Docker-Upload-UUID: -``` - -The blob has been created in the registry and is available at the provided location. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Invalid Name or Digest - -``` -400 Bad Request -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | - - - -###### On Failure: Not allowed - -``` -405 Method Not Allowed -``` - -Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - -##### Initiate Resumable Blob Upload - -``` -POST /v2//blobs/uploads/ -Host: -Authorization: -Content-Length: 0 -``` - -Initiate a resumable blob upload with an empty request body. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| -|`name`|path|Name of the target repository.| - - - - -###### On Success: Accepted - -``` -202 Accepted -Content-Length: 0 -Location: /v2//blobs/uploads/ -Range: 0-0 -Docker-Upload-UUID: -``` - -The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Location`|The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Invalid Name or Digest - -``` -400 Bad Request -``` - - - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - - -### Blob Upload - -Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls. - - - -#### GET Blob Upload - -Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. - - - -``` -GET /v2//blobs/uploads/ -Host: -Authorization: -``` - -Retrieve the progress of the current upload, as reported by the `Range` header. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Upload Progress - -``` -204 No Content -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The upload is known and in progress. The last received offset is available in the `Range` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Range`|Range indicating the current progress of the upload.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - -#### PATCH Blob Upload - -Upload a chunk of data for the specified upload. - - -##### Stream upload - -``` -PATCH /v2//blobs/uploads/ -Host: -Authorization: -Content-Type: application/octet-stream - - -``` - -Upload a stream of data to upload without completing the upload. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Data Accepted - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range indicating the current progress of the upload.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - -##### Chunked upload - -``` -PATCH /v2//blobs/uploads/ -Host: -Authorization: -Content-Range: - -Content-Length: -Content-Type: application/octet-stream - - -``` - -Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Range`|header|Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.| -|`Content-Length`|header|Length of the chunk being uploaded, corresponding the length of the request body.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Chunk Accepted - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| -|`Range`|Range indicating the current progress of the upload.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Requested Range Not Satisfiable - -``` -416 Requested Range Not Satisfiable -``` - -The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - -#### PUT Blob Upload - -Complete the upload specified by `uuid`, optionally appending the body as the final chunk. - - - -``` -PUT /v2//blobs/uploads/?digest= -Host: -Authorization: -Content-Length: -Content-Type: application/octet-stream - - -``` - -Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| -|`digest`|query|Digest of uploaded blob.| - - - - -###### On Success: Upload Complete - -``` -204 No Content -Location: -Content-Range: - -Content-Length: 0 -Docker-Content-Digest: -``` - -The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Location`|The canonical location of the blob for retrieval| -|`Content-Range`|Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| -|`Docker-Content-Digest`|Digest of the targeted content for the request.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -There was an error processing the upload and it must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | -| `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The upload must be restarted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - -#### DELETE Blob Upload - -Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. - - - -``` -DELETE /v2//blobs/uploads/ -Host: -Authorization: -Content-Length: 0 -``` - -Cancel the upload specified by `uuid`. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| -|`Authorization`|header|An RFC7235 compliant authorization header.| -|`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| -|`name`|path|Name of the target repository.| -|`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| - - - - -###### On Success: Upload Deleted - -``` -204 No Content -Content-Length: 0 -``` - -The upload has been successfully deleted. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| - - - - -###### On Failure: Bad Request - -``` -400 Bad Request -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -An error was encountered processing the delete. The client may ignore this error. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | -| `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | - - - -###### On Failure: Not Found - -``` -404 Not Found -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted. - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | - - - -###### On Failure: Authentication Required - -``` -401 Unauthorized -WWW-Authenticate: realm="", ..." -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client is not authenticated. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | - - - -###### On Failure: No Such Repository Error - -``` -404 Not Found -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The repository is not known to the registry. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | - - - -###### On Failure: Access Denied - -``` -403 Forbidden -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -} -``` - -The client does not have required access to the repository. - -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -| `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | - - - - - -### Catalog - -List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available. - - - -#### GET Catalog - -Retrieve a sorted, json list of repositories available in the registry. - - -##### Catalog Fetch Complete - -``` -GET /v2/_catalog -``` - -Request an unabridged list of repositories available. - - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Content-Type: application/json; charset=utf-8 - -{ - "repositories": [ - , - ... - ] -} -``` - -Returns the unabridged list of repositories as a json response. - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| - - - -##### Catalog Fetch Paginated - -``` -GET /v2/_catalog?n=&last= -``` - -Return the specified portion of repositories. - - -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -|`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| -|`last`|query|Result set will include values lexically after last.| - - - - -###### On Success: OK - -``` -200 OK -Content-Length: -Link: <?n=&last=>; rel="next" -Content-Type: application/json; charset=utf-8 - -{ - "repositories": [ - , - ... - ] - "next": "?last=&n=" -} -``` - - - -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -|`Content-Length`|Length of the JSON response body.| -|`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| - - - - - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl deleted file mode 100644 index cfb019043bc2..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/api.md.tmpl +++ /dev/null @@ -1,1126 +0,0 @@ - - -# Docker Registry HTTP API V2 - -## Introduction - -The _Docker Registry HTTP API_ is the protocol to facilitate distribution of -images to the docker engine. It interacts with instances of the docker -registry, which is a service to manage information about docker images and -enable their distribution. The specification covers the operation of version 2 -of this API, known as _Docker Registry HTTP API V2_. - -While the V1 registry protocol is usable, there are several problems with the -architecture that have led to this new version. The main driver of this -specification these changes to the docker the image format, covered in -[docker/docker#8093](https://github.com/docker/docker/issues/8093). The new, self-contained image manifest simplifies image -definition and improves security. This specification will build on that work, -leveraging new properties of the manifest format to improve performance, -reduce bandwidth usage and decrease the likelihood of backend corruption. - -For relevant details and history leading up to this specification, please see -the following issues: - -- [docker/docker#8093](https://github.com/docker/docker/issues/8093) -- [docker/docker#9015](https://github.com/docker/docker/issues/9015) -- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) - -### Scope - -This specification covers the URL layout and protocols of the interaction -between docker registry and docker core. This will affect the docker core -registry API and the rewrite of docker-registry. Docker registry -implementations may implement other API endpoints, but they are not covered by -this specification. - -This includes the following features: - -- Namespace-oriented URI Layout -- PUSH/PULL registry server for V2 image manifest format -- Resumable layer PUSH support -- V2 Client library implementation - -While authentication and authorization support will influence this -specification, details of the protocol will be left to a future specification. -Relevant header definitions and error codes are present to provide an -indication of what a client may encounter. - -#### Future - -There are features that have been discussed during the process of cutting this -specification. The following is an incomplete list: - -- Immutable image references -- Multiple architecture support -- Migration from v2compatibility representation - -These may represent features that are either out of the scope of this -specification, the purview of another specification or have been deferred to a -future version. - -### Use Cases - -For the most part, the use cases of the former registry API apply to the new -version. Differentiating use cases are covered below. - -#### Image Verification - -A docker engine instance would like to run verified image named -"library/ubuntu", with the tag "latest". The engine contacts the registry, -requesting the manifest for "library/ubuntu:latest". An untrusted registry -returns a manifest. Before proceeding to download the individual layers, the -engine verifies the manifest's signature, ensuring that the content was -produced from a trusted source and no tampering has occured. After each layer -is downloaded, the engine verifies the digest of the layer, ensuring that the -content matches that specified by the manifest. - -#### Resumable Push - -Company X's build servers lose connectivity to docker registry before -completing an image layer transfer. After connectivity returns, the build -server attempts to re-upload the image. The registry notifies the build server -that the upload has already been partially attempted. The build server -responds by only sending the remaining data to complete the image file. - -#### Resumable Pull - -Company X is having more connectivity problems but this time in their -deployment datacenter. When downloading an image, the connection is -interrupted before completion. The client keeps the partial data and uses http -`Range` requests to avoid downloading repeated data. - -#### Layer Upload De-duplication - -Company Y's build system creates two identical docker layers from build -processes A and B. Build process A completes uploading the layer before B. -When process B attempts to upload the layer, the registry indicates that its -not necessary because the layer is already known. - -If process A and B upload the same layer at the same time, both operations -will proceed and the first to complete will be stored in the registry (Note: -we may modify this to prevent dogpile with some locking mechanism). - -### Changes - -The V2 specification has been written to work as a living document, specifying -only what is certain and leaving what is not specified open or to future -changes. Only non-conflicting additions should be made to the API and accepted -changes should avoid preventing future changes from happening. - -This section should be updated when changes are made to the specification, -indicating what is different. Optionally, we may start marking parts of the -specification to correspond with the versions enumerated here. - -Each set of changes is given a letter corresponding to a set of modifications -that were applied to the baseline specification. These are merely for -reference and shouldn't be used outside the specification other than to -identify a set of modifications. - -
-
f
-
-
    -
  • Specify the delete API for layers and manifests.
  • -
-
- -
e
-
-
    -
  • Added support for listing registry contents.
  • -
  • Added pagination to tags API.
  • -
  • Added common approach to support pagination.
  • -
-
- -
d
-
-
    -
  • Allow repository name components to be one character.
  • -
  • Clarified that single component names are allowed.
  • -
-
- -
c
-
-
    -
  • Added section covering digest format.
  • -
  • Added more clarification that manifest cannot be deleted by tag.
  • -
-
- -
b
-
-
    -
  • Added capability of doing streaming upload to PATCH blob upload.
  • -
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • -
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
  • -
-
- -
a
-
-
    -
  • Added support for immutable manifest references in manifest endpoints.
  • -
  • Deleting a manifest by tag has been deprecated.
  • -
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • -
  • Added error code for unsupported operations.
  • -
-
-
- -## Overview - -This section covers client flows and details of the API endpoints. The URI -layout of the new API is structured to support a rich authentication and -authorization model by leveraging namespaces. All endpoints will be prefixed -by the API version and the repository name: - - /v2// - -For example, an API endpoint that will work with the `library/ubuntu` -repository, the URI prefix will be: - - /v2/library/ubuntu/ - -This scheme provides rich access control over various operations and methods -using the URI prefix and http methods that can be controlled in variety of -ways. - -Classically, repository names have always been two path components where each -path component is less than 30 characters. The V2 registry API does not -enforce this. The rules for a repository name are as follows: - -1. A repository name is broken up into _path components_. A component of a - repository name must be at least one lowercase, alpha-numeric characters, - optionally separated by periods, dashes or underscores. More strictly, it - must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. -2. If a repository name has two or more path components, they must be - separated by a forward slash ("/"). -3. The total length of a repository name, including slashes, must be less the - 256 characters. - -These name requirements _only_ apply to the registry API and should accept a -superset of what is supported by other docker ecosystem components. - -All endpoints should support aggressive http caching, compression and range -headers, where appropriate. The new API attempts to leverage HTTP semantics -where possible but may break from standards to implement targeted features. - -For detail on individual endpoints, please see the [_Detail_](#detail) -section. - -### Errors - -Actionable failure conditions, covered in detail in their relevant sections, -are reported as part of 4xx responses, in a json response body. One or more -errors will be returned in the following format: - - { - "errors:" [{ - "code": , - "message": , - "detail": - }, - ... - ] - } - -The `code` field will be a unique identifier, all caps with underscores by -convention. The `message` field will be a human readable string. The optional -`detail` field may contain arbitrary json data providing information the -client can use to resolve the issue. - -While the client can take action on certain error codes, the registry may add -new error codes over time. All client implementations should treat unknown -error codes as `UNKNOWN`, allowing future error codes to be added without -breaking API compatibility. For the purposes of the specification error codes -will only be added and never removed. - -For a complete account of all error codes, please see the _Detail_ section. - -### API Version Check - -A minimal endpoint, mounted at `/v2/` will provide version support information -based on its response statuses. The request format is as follows: - - GET /v2/ - -If a `200 OK` response is returned, the registry implements the V2(.1) -registry API and the client may proceed safely with other V2 operations. -Optionally, the response may contain information about the supported paths in -the response body. The client should be prepared to ignore this data. - -If a `401 Unauthorized` response is returned, the client should take action -based on the contents of the "WWW-Authenticate" header and try the endpoint -again. Depending on access control setup, the client may still have to -authenticate against different resources, even if this check succeeds. - -If `404 Not Found` response status, or other unexpected status, is returned, -the client should proceed with the assumption that the registry does not -implement V2 of the API. - -When a `200 OK` or `401 Unauthorized` response is returned, the -"Docker-Distribution-API-Version" header should be set to "registry/2.0". -Clients may require this header value to determine if the endpoint serves this -API. When this header is omitted, clients may fallback to an older API version. - -### Content Digests - -This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). -The core of this design is the concept of a content addressable identifier. It -uniquely identifies content by taking a collision-resistant hash of the bytes. -Such an identifier can be independently calculated and verified by selection -of a common _algorithm_. If such an identifier can be communicated in a secure -manner, one can retrieve the content from an insecure source, calculate it -independently and be certain that the correct content was obtained. Put simply, -the identifier is a property of the content. - -To disambiguate from other concepts, we call this identifier a _digest_. A -_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ -portion. The _algorithm_ identifies the methodology used to calculate the -digest. The _hex_ portion is the hex-encoded result of the hash. - -We define a _digest_ string to match the following grammar: -``` -digest := algorithm ":" hex -algorithm := /[A-Fa-f0-9_+.-]+/ -hex := /[A-Fa-f0-9]+/ -``` - -Some examples of _digests_ include the following: - -digest | description | -----------------------------------------------------------------------------------|------------------------------------------------ -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | -tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Tarsum digest, used for legacy layer digests. | - -> __NOTE:__ While we show an example of using a `tarsum` digest, the security -> of tarsum has not been verified. It is recommended that most implementations -> use sha256 for interoperability. - -While the _algorithm_ does allow one to implement a wide variety of -algorithms, compliant implementations should use sha256. Heavy processing of -input before calculating a hash is discouraged to avoid degrading the -uniqueness of the _digest_ but some canonicalization may be performed to -ensure consistent identifiers. - -Let's use a simple example in pseudo-code to demonstrate a digest calculation: -``` -let C = 'a small string' -let B = sha256(C) -let D = 'sha256:' + EncodeHex(B) -let ID(C) = D -``` - -Above, we have bytestring `C` passed into a function, `SHA256`, that returns a -bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated -with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` -as equal to `D`. A digest can be verified by independently calculating `D` and -comparing it with identifier `ID(C)`. - -#### Digest Header - -To provide verification of http content, any response may include a `Docker- -Content-Digest` header. This will include the digest of the target entity -returned in the response. For blobs, this is the entire blob content. For -manifests, this is the manifest body without the signature content, also known -as the JWS payload. Note that the commonly used canonicalization for digest -calculation may be dependent on the mediatype of the content, such as with -manifests. - -The client may choose to ignore the header or may verify it to ensure content -integrity and transport security. This is most important when fetching by a -digest. To ensure security, the content should be verified against the digest -used to fetch the content. At times, the returned digest may differ from that -used to initiate a request. Such digests are considered to be from different -_domains_, meaning they have different values for _algorithm_. In such a case, -the client may choose to verify the digests in both domains or ignore the -server's digest. To maintain security, the client _must_ always verify the -content against the _digest_ used to fetch the content. - -> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use -> the same digest used to fetch the content to verify it. The header `Docker- -> Content-Digest` should not be trusted over the "local" digest. - -### Pulling An Image - -An "image" is a combination of a JSON manifest and individual layer files. The -process of pulling an image centers around retrieving these two components. - -The first step in pulling an image is to retrieve the manifest. For reference, -the relevant manifest fields for the registry are the following: - - field | description | -----------|------------------------------------------------| -name | The name of the image. | -tag | The tag for this version of the image. | -fsLayers | A list of layer descriptors (including tarsum) | -signature | A JWS used to verify the manifest content | - -For more information about the manifest format, please see -[docker/docker#8093](https://github.com/docker/docker/issues/8093). - -When the manifest is in hand, the client must verify the signature to ensure -the names and layers are valid. Once confirmed, the client will then use the -tarsums to download the individual layers. Layers are stored in as blobs in -the V2 registry API, keyed by their tarsum digest. - -#### Pulling an Image Manifest - -The image manifest can be fetched with the following url: - -``` -GET /v2//manifests/ -``` - -The `name` and `reference` parameter identify the image and are required. The -reference may include a tag or digest. - -A `404 Not Found` response will be returned if the image is unknown to the -registry. If the image exists and the response is successful, the image -manifest will be returned, with the following format (see docker/docker#8093 -for details): - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": - } - -The client should verify the returned manifest signature for authenticity -before fetching layers. - -#### Pulling a Layer - -Layers are stored in the blob portion of the registry, keyed by tarsum digest. -Pulling a layer is carried out by a standard http request. The URL is as -follows: - - GET /v2//blobs/ - -Access to a layer will be gated by the `name` of the repository but is -identified uniquely in the registry by `tarsum`. The `tarsum` parameter is an -opaque field, to be interpreted by the tarsum library. - -This endpoint may issue a 307 (302 for /blobs/uploads/ -``` - -The parameters of this request are the image namespace under which the layer -will be linked. Responses to this request are covered below. - -##### Existing Layers - -The existence of a layer can be checked via a `HEAD` request to the blob store -API. The request should be formatted as follows: - -``` -HEAD /v2//blobs/ -``` - -If the layer with the tarsum specified in `digest` is available, a 200 OK -response will be received, with no actual body content (this is according to -http specification). The response will look as follows: - -``` -200 OK -Content-Length: -Docker-Content-Digest: -``` - -When this response is received, the client can assume that the layer is -already available in the registry under the given name and should take no -further action to upload the layer. Note that the binary digests may differ -for the existing registry layer, but the tarsums will be guaranteed to match. - -##### Uploading the Layer - -If the POST request is successful, a `202 Accepted` response will be returned -with the upload URL in the `Location` header: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The rest of the upload process can be carried out with the returned url, -called the "Upload URL" from the `Location` header. All responses to the -upload url, whether sending data or getting status, will be in this format. -Though the URI format (`/v2//blobs/uploads/`) for the `Location` -header is specified, clients should treat it as an opaque url and should never -try to assemble the it. While the `uuid` parameter may be an actual UUID, this -proposal imposes no constraints on the format and clients should never impose -any. - -If clients need to correlate local upload state with remote upload state, the -contents of the `Docker-Upload-UUID` header should be used. Such an id can be -used to key the last used location header when implementing resumable uploads. - -##### Upload Progress - -The progress and chunk coordination of the upload process will be coordinated -through the `Range` header. While this is a non-standard use of the `Range` -header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. -For an upload that just started, for an example with a 1000 byte layer file, -the `Range` header would be as follows: - -``` -Range: bytes=0-0 -``` - -To get the status of an upload, issue a GET request to the upload URL: - -``` -GET /v2//blobs/uploads/ -Host: -``` - -The response will be similar to the above, except will return 204 status: - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: bytes=0- -Docker-Upload-UUID: -``` - -Note that the HTTP `Range` header byte ranges are inclusive and that will be -honored, even in non-standard use cases. - -##### Monolithic Upload - -A monolithic upload is simply a chunked upload with a single chunk and may be -favored by clients that would like to avoided the complexity of chunking. To -carry out a "monolithic" upload, one can simply put the entire content blob to -the provided URL: - -``` -PUT /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -The "digest" parameter must be included with the PUT request. Please see the -_Completed Upload_ section for details on the parameters and expected -responses. - -Additionally, the upload can be completed with a single `POST` request to -the uploads endpoint, including the "size" and "digest" parameters: - -``` -POST /v2//blobs/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Type: application/octet-stream - - -``` - -On the registry service, this should allocate a download, accept and verify -the data and return the same response as the final chunk of an upload. If the -POST request fails collecting the data in any way, the registry should attempt -to return an error response to the client with the `Location` header providing -a place to continue the download. - -The single `POST` method is provided for convenience and most clients should -implement `POST` + `PUT` to support reliable resume of uploads. - -##### Chunked Upload - -To carry out an upload of a chunk, the client can specify a range header and -only include that part of the layer file: - -``` -PATCH /v2//blobs/uploads/ -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -There is no enforcement on layer chunk splits other than that the server must -receive them in order. The server may enforce a minimum chunk size. If the -server cannot accept the chunk, a `416 Requested Range Not Satisfiable` -response will be returned and will include a `Range` header indicating the -current status: - -``` -416 Requested Range Not Satisfiable -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -If this response is received, the client should resume from the "last valid -range" and upload the subsequent chunk. A 416 will be returned under the -following conditions: - -- Invalid Content-Range header format -- Out of order chunk: the range of the next chunk must start immediately after - the "last valid range" from the previous response. - -When a chunk is accepted as part of the upload, a `202 Accepted` response will -be returned, including a `Range` header with the current upload status: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -##### Completed Upload - -For an upload to be considered complete, the client must submit a `PUT` -request on the upload endpoint with a digest parameter. If it is not provided, -the upload will not be considered complete. The format for the final chunk -will be as follows: - -``` -PUT /v2//blob/uploads/?digest=[&digest=sha256:] -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -Optionally, if all chunks have already been uploaded, a `PUT` request with a -`digest` parameter and zero-length body may be sent to complete and validated -the upload. Multiple "digest" parameters may be provided with different -digests. The server may verify none or all of them but _must_ notify the -client if the content is rejected. - -When the last chunk is received and the layer has been validated, the client -will receive a `201 Created` response: - -``` -201 Created -Location: /v2//blobs/ -Content-Length: 0 -Docker-Content-Digest: -``` - -The `Location` header will contain the registry URL to access the accepted -layer file. The `Docker-Content-Digest` header returns the canonical digest of -the uploaded blob which may differ from the provided digest. Most clients may -ignore the value but if it is used, the client should verify the value against -the uploaded blob data. - -###### Digest Parameter - -The "digest" parameter is designed as an opaque parameter to support -verification of a successful transfer. The initial version of the registry API -will support a tarsum digest, in the standard tarsum format. For example, a -HTTP URI parameter might be as follows: - -``` -tarsum.v1+sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Given this parameter, the registry will verify that the provided content does -result in this tarsum. Optionally, the registry can support other other digest -parameters for non-tarfile content stored as a layer. A regular hash digest -might be specified as follows: - -``` -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Such a parameter would be used to verify that the binary content (as opposed -to the tar content) would be verified at the end of the upload process. - -For the initial version, registry servers are only required to support the -tarsum format. - -##### Canceling an Upload - -An upload can be cancelled by issuing a DELETE request to the upload endpoint. -The format will be as follows: - -``` -DELETE /v2//blobs/uploads/ -``` - -After this request is issued, the upload uuid will no longer be valid and the -registry server will dump all intermediate data. While uploads will time out -if not completed, clients should issue this request if they encounter a fatal -error but still have the ability to issue an http request. - -##### Errors - -If an 502, 503 or 504 error is received, the client should assume that the -download can proceed due to a temporary condition, honoring the appropriate -retry mechanism. Other 5xx errors should be treated as terminal. - -If there is a problem with the upload, a 4xx error will be returned indicating -the problem. After receiving a 4xx response (except 416, as called out above), -the upload will be considered failed and the client should take appropriate -action. - -Note that the upload url will not be available forever. If the upload uuid is -unknown to the registry, a `404 Not Found` response will be returned and the -client must restart the upload process. - -### Deleting a Layer - -A layer may be deleted from the registry via its `name` and `digest`. A -delete may be issued with the following request format: - - DELETE /v2//blobs/ - -If the blob exists and has been successfully deleted, the following response -will be issued: - - 202 Accepted - Content-Length: None - -If the blob had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -If a layer is deleted which is referenced by a manifest in the registry, -then the complete images will not be resolvable. - -#### Pushing an Image Manifest - -Once all of the layers for an image are uploaded, the client can upload the -image manifest. An image can be pushed using the following request format: - - PUT /v2//manifests/ - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": , - ... - } - -The `name` and `reference` fields of the response body must match those specified in -the URL. The `reference` field may be a "tag" or a "digest". - -If there is a problem with pushing the manifest, a relevant 4xx response will -be returned with a JSON error message. Please see the _PUT Manifest section -for details on possible error codes that may be returned. - -If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are -returned. The `detail` field of the error response will have a `digest` field -identifying the missing blob, which will be a tarsum. An error is returned for -each unknown blob. The response format is as follows: - - { - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": - } - }, - ... - ] - } - -### Listing Repositories - -Images are stored in collections, known as a _repository_, which is keyed by a -`name`, as seen throughout the API specification. A registry instance may -contain several repositories. The list of available repositories is made -available through the _catalog_. - -The catalog for a given registry can be retrieved with the following request: - -``` -GET /v2/_catalog -``` - -The response will be in the following format: - -``` -200 OK -Content-Type: application/json - -{ - "repositories": [ - , - ... - ] -} -``` - -Note that the contents of the response are specific to the registry -implementation. Some registries may opt to provide a full catalog output, -limit it based on the user's access level or omit upstream results, if -providing mirroring functionality. Subsequently, the presence of a repository -in the catalog listing only means that the registry *may* provide access to -the repository at the time of the request. Conversely, a missing entry does -*not* mean that the registry does not have the repository. More succinctly, -the presence of a repository only guarantees that it is there but not that it -is _not_ there. - -For registries with a large number of repositories, this response may be quite -large. If such a response is expected, one should use pagination. A registry -may also limit the amount of responses returned even if pagination was not -explicitly requested. In this case the `Link` header will be returned along -with the results, and subsequent results can be obtained by following the link -as if pagination had been initially requested. - -For details of the `Link` header, please see the _Pagination_ section. - -#### Pagination - -Paginated catalog results can be retrieved by adding an `n` parameter to the -request URL, declaring that the response should be limited to `n` results. -Starting a paginated flow begins as follows: - -``` -GET /v2/_catalog?n= -``` - -The above specifies that a catalog response should be returned, from the start of -the result set, ordered lexically, limiting the number of results to `n`. The -response to such a request would look as follows: - -``` -200 OK -Content-Type: application/json -Link: <?n=&last=>; rel="next" - -{ - "repositories": [ - , - ... - ] -} -``` - -The above includes the _first_ `n` entries from the result set. To get the -_next_ `n` entries, one can create a URL where the argument `last` has the -value from `repositories[len(repositories)-1]`. If there are indeed more -results, the URL for the next block is encoded in an -[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" -relation. The presence of the `Link` header communicates to the client that -the entire result set has not been returned and another request must be -issued. If the header is not present, the client can assume that all results -have been recieved. - -> __NOTE:__ In the request template above, note that the brackets -> are required. For example, if the url is -> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would -> be `; rel="next"`. Please see -> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. - -Compliant client implementations should always use the `Link` header -value when proceeding through results linearly. The client may construct URLs -to skip forward in the catalog. - -To get the next result set, a client would issue the request as follows, using -the URL encoded in the described `Link` header: - -``` -GET /v2/_catalog?n=&last= -``` - -The above process should then be repeated until the `Link` header is no longer -set. - -The catalog result set is represented abstractly as a lexically sorted list, -where the position in that list can be specified by the query term `last`. The -entries in the response start _after_ the term specified by `last`, up to `n` -entries. - -The behavior of `last` is quite simple when demonstrated with an example. Let -us say the registry has the following repositories: - -``` -a -b -c -d -``` - -If the value of `n` is 2, _a_ and _b_ will be returned on the first response. -The `Link` header returned on the response will have `n` set to 2 and last set -to _b_: - -``` -Link: <?n=2&last=b>; rel="next" -``` - -The client can then issue the request with above value from the `Link` header, -receiving the values _c_ and _d_. Note that n may change on second to last -response or be omitted fully, if the server may so choose. - -### Listing Image Tags - -It may be necessary to list all of the tags under a given repository. The tags -for an image repository can be retrieved with the following request: - - GET /v2//tags/list - -The response will be in the following format: - - 200 OK - Content-Type: application/json - - { - "name": , - "tags": [ - , - ... - ] - } - -For repositories with a large number of tags, this response may be quite -large. If such a response is expected, one should use the pagination. - -#### Pagination - -Paginated tag results can be retrieved by adding the appropriate parameters to -the request URL described above. The behavior of tag pagination is identical -to that specified for catalog pagination. We cover a simple flow to highlight -any differences. - -Starting a paginated flow may begin as follows: - -``` -GET /v2//tags/list?n= -``` - -The above specifies that a tags response should be returned, from the start of -the result set, ordered lexically, limiting the number of results to `n`. The -response to such a request would look as follows: - -``` -200 OK -Content-Type: application/json -Link: <?n=&last=>; rel="next" - -{ - "name": , - "tags": [ - , - ... - ] -} -``` - -To get the next result set, a client would issue the request as follows, using -the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` -header: - -``` -GET /v2//tags/list?n=&last= -``` - -The above process should then be repeated until the `Link` header is no longer -set in the response. The behavior of the `last` parameter, the provided -response result, lexical ordering and encoding of the `Link` header are -identical to that of catalog pagination. - -### Deleting an Image - -An image may be deleted from the registry via its `name` and `reference`. A -delete may be issued with the following request format: - - DELETE /v2//manifests/ - -For deletes, `reference` *must* be a digest or the delete will fail. If the -image exists and has been successfully deleted, the following response will be -issued: - - 202 Accepted - Content-Length: None - -If the image had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -## Detail - -> **Note**: This section is still under construction. For the purposes of -> implementation, if any details below differ from the described request flows -> above, the section below should be corrected. When they match, this note -> should be removed. - -The behavior of the endpoints are covered in detail in this section, organized -by route and entity. All aspects of the request and responses are covered, -including headers, parameters and body formats. Examples of requests and their -corresponding responses, with success and failure, are enumerated. - -> **Note**: The sections on endpoint detail are arranged with an example -> request, a description of the request, followed by information about that -> request. - -A list of methods and URIs are covered in the table below: - -|Method|Path|Entity|Description| -|------|----|------|-----------| -{{range $route := .RouteDescriptors}}{{range $method := .Methods}}| {{$method.Method}} | `{{$route.Path|prettygorilla}}` | {{$route.Entity}} | {{$method.Description}} | -{{end}}{{end}} - -The detail for each endpoint is covered in the following sections. - -### Errors - -The error codes encountered via the API are enumerated in the following table: - -|Code|Message|Description| -|----|-------|-----------| -{{range $err := .ErrorDescriptors}} `{{$err.Value}}` | {{$err.Message}} | {{$err.Description|removenewlines}} -{{end}} - -{{range $route := .RouteDescriptors}} -### {{.Entity}} - -{{.Description}} - -{{range $method := $route.Methods}} - -#### {{.Method}} {{$route.Entity}} - -{{.Description}} - -{{if .Requests}}{{range .Requests}}{{if .Name}} -##### {{.Name}}{{end}} - -``` -{{$method.Method}} {{$route.Path|prettygorilla}}{{range $i, $param := .QueryParameters}}{{if eq $i 0}}?{{else}}&{{end}}{{$param.Name}}={{$param.Format}}{{end}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} - -{{if or .Headers .PathParameters .QueryParameters}} -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -{{range .Headers}}|`{{.Name}}`|header|{{.Description}}| -{{end}}{{range .PathParameters}}|`{{.Name}}`|path|{{.Description}}| -{{end}}{{range .QueryParameters}}|`{{.Name}}`|query|{{.Description}}| -{{end}}{{end}} - -{{if .Successes}} -{{range .Successes}} -###### On Success: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} - -``` -{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} -{{if .Fields}}The following fields may be returned in the response body: - -|Name|Description| -|----|-----------| -{{range .Fields}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}}{{if .Headers}} -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -{{range .Headers}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}}{{end}}{{end}} - -{{if .Failures}} -{{range .Failures}} -###### On Failure: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} - -``` -{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} -{{if .Headers}} -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -{{range .Headers}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}} - -{{if .ErrorCodes}} -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -{{range $err := .ErrorCodes}}| `{{$err}}` | {{$err.Descriptor.Message}} | {{$err.Descriptor.Description|removenewlines}} | -{{end}} - -{{end}}{{end}}{{end}}{{end}}{{end}}{{end}} - -{{end}} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/index.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/index.md deleted file mode 100644 index b0d312563720..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/index.md +++ /dev/null @@ -1,14 +0,0 @@ - - -# Docker Registry v2 authentication - -See the [Token Authentication Specification](token.md) and -[Token Authentication Implementation](jwt.md) for more information. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/jwt.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/jwt.md deleted file mode 100644 index f627b17a2d89..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/jwt.md +++ /dev/null @@ -1,324 +0,0 @@ - - -# Docker Registry v2 Bearer token specification - -This specification covers the `docker/distribution` implementation of the -v2 Registry's authentication schema. Specifically, it describes the JSON -Web Token schema that `docker/distribution` has adopted to implement the -client-opaque Bearer token issued by an authentication service and -understood by the registry. - -This document borrows heavily from the [JSON Web Token Draft Spec](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) - -## Getting a Bearer Token - -For this example, the client makes an HTTP GET request to the following URL: - -``` -https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push -``` - -The token server should first attempt to authenticate the client using any -authentication credentials provided with the request. As of Docker 1.8, the -registry client in the Docker Engine only supports Basic Authentication to -these token servers. If an attempt to authenticate to the token server fails, -the token server should return a `401 Unauthorized` response indicating that -the provided credentials are invalid. - -Whether the token server requires authentication is up to the policy of that -access control provider. Some requests may require authentication to determine -access (such as pushing or pulling a private repository) while others may not -(such as pulling from a public repository). - -After authenticating the client (which may simply be an anonymous client if -no attempt was made to authenticate), the token server must next query its -access control list to determine whether the client has the requested scope. In -this example request, if I have authenticated as user `jlhawn`, the token -server will determine what access I have to the repository `samalba/my-app` -hosted by the entity `registry.docker.io`. - -Once the token server has determined what access the client has to the -resources requested in the `scope` parameter, it will take the intersection of -the set of requested actions on each resource and the set of actions that the -client has in fact been granted. If the client only has a subset of the -requested access **it must not be considered an error** as it is not the -responsibility of the token server to indicate authorization errors as part of -this workflow. - -Continuing with the example request, the token server will find that the -client's set of granted access to the repository is `[pull, push]` which when -intersected with the requested access `[pull, push]` yields an equal set. If -the granted access set was found only to be `[pull]` then the intersected set -would only be `[pull]`. If the client has no access to the repository then the -intersected set would be empty, `[]`. - -It is this intersected set of access which is placed in the returned token. - -The server will now construct a JSON Web Token to sign and return. A JSON Web -Token has 3 main parts: - -1. Headers - - The header of a JSON Web Token is a standard JOSE header. The "typ" field - will be "JWT" and it will also contain the "alg" which identifies the - signing algorithm used to produce the signature. It will also usually have - a "kid" field, the ID of the key which was used to sign the token. - - Here is an example JOSE Header for a JSON Web Token (formatted with - whitespace for readability): - - ``` - { - "typ": "JWT", - "alg": "ES256", - "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6" - } - ``` - - It specifies that this object is going to be a JSON Web token signed using - the key with the given ID using the Elliptic Curve signature algorithm - using a SHA256 hash. - -2. Claim Set - - The Claim Set is a JSON struct containing these standard registered claim - name fields: - -
-
- iss (Issuer) -
-
- The issuer of the token, typically the fqdn of the authorization - server. -
-
- sub (Subject) -
-
- The subject of the token; the name or id of the client which - requested it. This should be empty (`""`) if the client did not - authenticate. -
-
- aud (Audience) -
-
- The intended audience of the token; the name or id of the service - which will verify the token to authorize the client/subject. -
-
- exp (Expiration) -
-
- The token should only be considered valid up to this specified date - and time. -
-
- nbf (Not Before) -
-
- The token should not be considered valid before this specified date - and time. -
-
- iat (Issued At) -
-
- Specifies the date and time which the Authorization server - generated this token. -
-
- jti (JWT ID) -
-
- A unique identifier for this token. Can be used by the intended - audience to prevent replays of the token. -
-
- - The Claim Set will also contain a private claim name unique to this - authorization server specification: - -
-
- access -
-
- An array of access entry objects with the following fields: - -
-
- type -
-
- The type of resource hosted by the service. -
-
- name -
-
- The name of the resource of the given type hosted by the - service. -
-
- actions -
-
- An array of strings which give the actions authorized on - this resource. -
-
-
-
- - Here is an example of such a JWT Claim Set (formatted with whitespace for - readability): - - ``` - { - "iss": "auth.docker.com", - "sub": "jlhawn", - "aud": "registry.docker.com", - "exp": 1415387315, - "nbf": 1415387015, - "iat": 1415387015, - "jti": "tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws", - "access": [ - { - "type": "repository", - "name": "samalba/my-app", - "actions": [ - "pull", - "push" - ] - } - ] - } - ``` - -3. Signature - - The authorization server will produce a JOSE header and Claim Set with no - extraneous whitespace, i.e., the JOSE Header from above would be - - ``` - {"typ":"JWT","alg":"ES256","kid":"PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6"} - ``` - - and the Claim Set from above would be - - ``` - {"iss":"auth.docker.com","sub":"jlhawn","aud":"registry.docker.com","exp":1415387315,"nbf":1415387015,"iat":1415387015,"jti":"tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws","access":[{"type":"repository","name":"samalba/my-app","actions":["push","pull"]}]} - ``` - - The utf-8 representation of this JOSE header and Claim Set are then - url-safe base64 encoded (sans trailing '=' buffer), producing: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0 - ``` - - for the JOSE Header and - - ``` - eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 - ``` - - for the Claim Set. These two are concatenated using a '.' character, - yielding the string: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 - ``` - - This is then used as the payload to a the `ES256` signature algorithm - specified in the JOSE header and specified fully in [Section 3.4 of the JSON Web Algorithms (JWA) - draft specification](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-38#section-3.4) - - This example signature will use the following ECDSA key for the server: - - ``` - { - "kty": "EC", - "crv": "P-256", - "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6", - "d": "R7OnbfMaD5J2jl7GeE8ESo7CnHSBm_1N2k9IXYFrKJA", - "x": "m7zUpx3b-zmVE5cymSs64POG9QcyEpJaYCD82-549_Q", - "y": "dU3biz8sZ_8GPB-odm8Wxz3lNDr1xcAQQPQaOcr1fmc" - } - ``` - - A resulting signature of the above payload using this key is: - - ``` - QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w - ``` - - Concatenating all of these together with a `.` character gives the - resulting JWT: - - ``` - eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w - ``` - -This can now be placed in an HTTP response and returned to the client to use to -authenticate to the audience service: - - -``` -HTTP/1.1 200 OK -Content-Type: application/json - -{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w"} -``` - -## Using the signed token - -Once the client has a token, it will try the registry request again with the -token placed in the HTTP `Authorization` header like so: - -``` -Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw -``` - -This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) - -## Verifying the token - -The registry must now verify the token presented by the user by inspecting the -claim set within. The registry will: - -- Ensure that the issuer (`iss` claim) is an authority it trusts. -- Ensure that the registry identifies as the audience (`aud` claim). -- Check that the current time is between the `nbf` and `exp` claim times. -- If enforcing single-use tokens, check that the JWT ID (`jti` claim) value has - not been seen before. - - To enforce this, the registry may keep a record of `jti`s it has seen for - up to the `exp` time of the token to prevent token replays. -- Check the `access` claim value and use the identified resources and the list - of actions authorized to determine whether the token grants the required - level of access for the operation the client is attempting to perform. -- Verify that the signature of the token is valid. - -If any of these requirements are not met, the registry will return a -`403 Forbidden` response to indicate that the token is invalid. - -**Note**: it is only at this point in the workflow that an authorization error -may occur. The token server should *not* return errors when the user does not -have the requested authorization. Instead, the returned token should indicate -whatever of the requested scope the client does have (the intersection of -requested and granted access). If the token does not supply proper -authorization then the registry will return the appropriate error. - -At no point in this process should the registry need to call back to the -authorization server. The registry only needs to be supplied with the trusted -public keys to verify the token signatures. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md deleted file mode 100644 index 61e893c04e1a..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/auth/token.md +++ /dev/null @@ -1,220 +0,0 @@ - - -# Docker Registry v2 authentication via central service - -This document outlines the v2 Docker registry authentication scheme: - -![v2 registry auth](https://docs.google.com/drawings/d/1EHZU9uBLmcH0kytDClBv6jv6WR4xZjE8RKEUw1mARJA/pub?w=480&h=360) - -1. Attempt to begin a push/pull operation with the registry. -2. If the registry requires authorization it will return a `401 Unauthorized` - HTTP response with information on how to authenticate. -3. The registry client makes a request to the authorization service for a - Bearer token. -4. The authorization service returns an opaque Bearer token representing the - client's authorized access. -5. The client retries the original request with the Bearer token embedded in - the request's Authorization header. -6. The Registry authorizes the client by validating the Bearer token and the - claim set embedded within it and begins the push/pull session as usual. - -## Requirements - -- Registry clients which can understand and respond to token auth challenges - returned by the resource server. -- An authorization server capable of managing access controls to their - resources hosted by any given service (such as repositories in a Docker - Registry). -- A Docker Registry capable of trusting the authorization server to sign tokens - which clients can use for authorization and the ability to verify these - tokens for single use or for use during a sufficiently short period of time. - -## Authorization Server Endpoint Descriptions - -The described server is meant to serve as a standalone access control manager -for resources hosted by other services which wish to authenticate and manage -authorizations using a separate access control manager. - -A service like this is used by the official Docker Registry to authenticate -clients and verify their authorization to Docker image repositories. - -As of Docker 1.6, the registry client within the Docker Engine has been updated -to handle such an authorization workflow. - -## How to authenticate - -Registry V1 clients first contact the index to initiate a push or pull. Under -the Registry V2 workflow, clients should contact the registry first. If the -registry server requires authentication it will return a `401 Unauthorized` -response with a `WWW-Authenticate` header detailing how to authenticate to this -registry. - -For example, say I (username `jlhawn`) am attempting to push an image to the -repository `samalba/my-app`. For the registry to authorize this, I will need -`push` access to the `samalba/my-app` repository. The registry will first -return this response: - -``` -HTTP/1.1 401 Unauthorized -Content-Type: application/json; charset=utf-8 -Docker-Distribution-Api-Version: registry/2.0 -Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" -Date: Thu, 10 Sep 2015 19:32:31 GMT -Content-Length: 235 -Strict-Transport-Security: max-age=31536000 - -{"errors":[{"code":"UNAUTHORIZED","message":"access to the requested resource is not authorized","detail":[{"Type":"repository","Name":"samalba/my-app","Action":"pull"},{"Type":"repository","Name":"samalba/my-app","Action":"push"}]}]} -``` - -Note the HTTP Response Header indicating the auth challenge: - -``` -Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" -``` - -This format is documented in [Section 3 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-3) - -This challenge indicates that the registry requires a token issued by the -specified token server and that the request the client is attempting will -need to include sufficient access entries in its claim set. To respond to this -challenge, the client will need to make a `GET` request to the URL -`https://auth.docker.io/token` using the `service` and `scope` values from the -`WWW-Authenticate` header. - -## Requesting a Token - -#### Query Parameters - -
-
- service -
-
- The name of the service which hosts the resource. -
-
- scope -
-
- The resource in question, formatted as one of the space-delimited - entries from the scope parameters from the WWW-Authenticate header - shown above. This query parameter should be specified multiple times if - there is more than one scope entry from the WWW-Authenticate - header. The above example would be specified as: - scope=repository:samalba/my-app:push. -
-
- - -#### Token Response Fields - -
-
- token -
-
- An opaque Bearer token that clients should supply to subsequent - requests in the Authorization header. -
-
- access_token -
-
- For compatibility with OAuth 2.0, we will also accept token under the name - access_token. At least one of these fields must be specified, but - both may also appear (for compatibility with older clients). When both are specified, - they should be equivalent; if they differ the client's choice is undefined. -
-
- expires_in -
-
- (Optional) The duration in seconds since the token was issued that it - will remain valid. When omitted, this defaults to 60 seconds. For - compatibility with older clients, a token should never be returned with - less than 60 seconds to live. -
-
- issued_at -
-
- (Optional) The RFC3339-serialized UTC - standard time at which a given token was issued. If issued_at is omitted, the - expiration is from when the token exchange completed. -
-
- -#### Example - -For this example, the client makes an HTTP GET request to the following URL: - -``` -https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push -``` - -The token server should first attempt to authenticate the client using any -authentication credentials provided with the request. As of Docker 1.8, the -registry client in the Docker Engine only supports Basic Authentication to -these token servers. If an attempt to authenticate to the token server fails, -the token server should return a `401 Unauthorized` response indicating that -the provided credentials are invalid. - -Whether the token server requires authentication is up to the policy of that -access control provider. Some requests may require authentication to determine -access (such as pushing or pulling a private repository) while others may not -(such as pulling from a public repository). - -After authenticating the client (which may simply be an anonymous client if -no attempt was made to authenticate), the token server must next query its -access control list to determine whether the client has the requested scope. In -this example request, if I have authenticated as user `jlhawn`, the token -server will determine what access I have to the repository `samalba/my-app` -hosted by the entity `registry.docker.io`. - -Once the token server has determined what access the client has to the -resources requested in the `scope` parameter, it will take the intersection of -the set of requested actions on each resource and the set of actions that the -client has in fact been granted. If the client only has a subset of the -requested access **it must not be considered an error** as it is not the -responsibility of the token server to indicate authorization errors as part of -this workflow. - -Continuing with the example request, the token server will find that the -client's set of granted access to the repository is `[pull, push]` which when -intersected with the requested access `[pull, push]` yields an equal set. If -the granted access set was found only to be `[pull]` then the intersected set -would only be `[pull]`. If the client has no access to the repository then the -intersected set would be empty, `[]`. - -It is this intersected set of access which is placed in the returned token. - -The server then constructs an implementation-specific token with this -intersected set of access, and returns it to the Docker client to use to -authenticate to the audience service (within the indicated window of time): - -``` -HTTP/1.1 200 OK -Content-Type: application/json - -{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w", "expires_in": "3600","issued_at": "2009-11-10T23:00:00Z"} -``` - - -## Using the Bearer token - -Once the client has a token, it will try the registry request again with the -token placed in the HTTP `Authorization` header like so: - -``` -Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw -``` - -This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/implementations.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/implementations.md deleted file mode 100644 index ec937b647460..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/implementations.md +++ /dev/null @@ -1,32 +0,0 @@ - - -# Distribution API Implementations - -This is a list of known implementations of the Distribution API spec. - -## [Docker Distribution Registry](https://github.com/docker/distribution) - -Docker distribution is the reference implementation of the distribution API -specification. It aims to fully implement the entire specification. - -### Releases -#### 2.0.1 (_in development_) -Implements API 2.0.1 - -_Known Issues_ - - No resumable push support - - Content ranges ignored - - Blob upload status will always return a starting range of 0 - -#### 2.0.0 -Implements API 2.0.0 - -_Known Issues_ - - No resumable push support - - No PATCH implementation for blob upload - - Content ranges ignored - diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md deleted file mode 100644 index a8916dccce5d..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/json.md +++ /dev/null @@ -1,94 +0,0 @@ - - - - -# Docker Distribution JSON Canonicalization - -To provide consistent content hashing of JSON objects throughout Docker -Distribution APIs, the specification defines a canonical JSON format. Adopting -such a canonicalization also aids in caching JSON responses. - -Note that protocols should not be designed to depend on identical JSON being -generated across different versions or clients. The canonicalization rules are -merely useful for caching and consistency. - -## Rules - -Compliant JSON should conform to the following rules: - -1. All generated JSON should comply with [RFC - 7159](http://www.ietf.org/rfc/rfc7159.txt). -2. Resulting "JSON text" shall always be encoded in UTF-8. -3. Unless a canonical key order is defined for a particular schema, object - keys shall always appear in lexically sorted order. -4. All whitespace between tokens should be removed. -5. No "trailing commas" are allowed in object or array definitions. -6. The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e". - Ampersand "&" is escaped to "\u0026". - -## Examples - -The following is a simple example of a canonicalized JSON string: - -```json -{"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]} -``` - -## Reference - -### Other Canonicalizations - -The OLPC project specifies [Canonical -JSON](http://wiki.laptop.org/go/Canonical_JSON). While this is used in -[TUF](http://theupdateframework.com/), which may be used with other -distribution-related protocols, this alternative format has been proposed in -case the original source changes. Specifications complying with either this -specification or an alternative should explicitly call out the -canonicalization format. Except for key ordering, this specification is mostly -compatible. - -### Go - -In Go, the [`encoding/json`](http://golang.org/pkg/encoding/json/) library -will emit canonical JSON by default. Simply using `json.Marshal` will suffice -in most cases: - -```go -incoming := map[string]interface{}{ - "asdf": 1, - "qwer": []interface{}{}, - "zxcv": []interface{}{ - map[string]interface{}{}, - true, - int(1e9), - "tyui", - }, -} - -canonical, err := json.Marshal(incoming) -if err != nil { - // ... handle error -} -``` - -To apply canonical JSON format spacing to an existing serialized JSON buffer, one -can use -[`json.Indent`](http://golang.org/src/encoding/json/indent.go?s=1918:1989#L65) -with the following arguments: - -```go -incoming := getBytes() -var canonical bytes.Buffer -if err := json.Indent(&canonical, incoming, "", ""); err != nil { - // ... handle error -} -``` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md deleted file mode 100644 index bb60ddd9f812..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/spec/manifest-v2-1.md +++ /dev/null @@ -1,163 +0,0 @@ - - -# Image Manifest Version 2, Schema 1 - -This document outlines the format of of the V2 image manifest. The image -manifest described herein was introduced in the Docker daemon in the [v1.3.0 -release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453). -It is a provisional manifest to provide a compatibility with the [V1 Image -format](https://github.com/docker/docker/blob/master/image/spec/v1.md), as the -requirements are defined for the [V2 Schema 2 -image](https://github.com/docker/distribution/pull/62). - - -Image manifests describe the various constituents of a docker image. Image -manifests can be serialized to JSON format with the following media types: - -Manifest Type | Media Type -------------- | ------------- -manifest | "application/vnd.docker.distribution.manifest.v1+json" -signed manifest | "application/vnd.docker.distribution.manifest.v1+prettyjws" - -*Note that "application/json" will also be accepted for schema 1.* - -References: - - - [Proposal: JSON Registry API V2.1](https://github.com/docker/docker/issues/9015) - - [Proposal: Provenance step 1 - Transform images for validation and verification](https://github.com/docker/docker/issues/8093) - -## *Manifest* Field Descriptions - -Manifest provides the base accessible fields for working with V2 image format - in the registry. - -- **`name`** *string* - - name is the name of the image's repository - -- **`tag`** *string* - - tag is the tag of the image - -- **`architecture`** *string* - - architecture is the host architecture on which this image is intended to - run. This is for information purposes and not currently used by the engine - -- **`fsLayers`** *array* - - fsLayers is a list of filesystem layer blob sums contained in this image. - - An fsLayer is a struct consisting of the following fields - - **`blobSum`** *digest.Digest* - - blobSum is the digest of the referenced filesystem image layer. A - digest can be a tarsum or sha256 hash. - - -- **`history`** *array* - - history is a list of unstructured historical data for v1 compatibility. It - contains ID of the image layer and ID of the layer's parent layers. - - history is a struct consisting of the following fields - - **`v1Compatibility`** string - - V1Compatibility is the raw V1 compatibility information. This will - contain the JSON object describing the V1 of this image. - -- **`schemaVersion`** *int* - - SchemaVersion is the image manifest schema that this image follows. - ->**Note**:the length of `history` must be equal to the length of `fsLayers` and ->entries in each are correlated by index. - -## Signed Manifests - -Signed manifests provides an envelope for a signed image manifest. A signed -manifest consists of an image manifest along with an additional field -containing the signature of the manifest. - -The docker client can verify signed manifests and displays a message to the user. - -### Signing Manifests - -Image manifests can be signed in two different ways: with a *libtrust* private - key or an x509 certificate chain. When signing with an x509 certificate chain, - the public key of the first element in the chain must be the public key - corresponding with the sign key. - -### Signed Manifest Field Description - -Signed manifests include an image manifest and a list of signatures generated -by *libtrust*. A signature consists of the following fields: - - -- **`header`** *[JOSE](http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2)* - - A [JSON Web Signature](http://self-issued.info/docs/draft-ietf-jose-json-web-signature.html) - -- **`signature`** *string* - - A signature for the image manifest, signed by a *libtrust* private key - -- **`protected`** *string* - - The signed protected header - -## Example Manifest - -*Example showing the official 'hello-world' image manifest.* - -``` -{ - "name": "hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - }, - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - }, - { - "blobSum": "sha256:cc8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11" - }, - { - "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - ], - "schemaVersion": 1, - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OD6I:6DRK:JXEJ:KBM4:255X:NSAA:MUSF:E4VM:ZI6W:CUN2:L4Z6:LSF4", - "kty": "EC", - "x": "3gAwX48IQ5oaYQAYSxor6rYYc_6yjuLCjtQ9LUakg4A", - "y": "t72ge6kIA1XOjqjVoEOiPPAURltJFBMGDSQvEGVB010" - }, - "alg": "ES256" - }, - "signature": "XREm0L8WNn27Ga_iE_vRnTxVMhhYY0Zst_FfkKopg6gWSoTOZTuW4rK0fg_IqnKkEKlbD83tD46LKEGi5aIVFg", - "protected": "eyJmb3JtYXRMZW5ndGgiOjY2MjgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wNC0wOFQxODo1Mjo1OVoifQ" - } - ] -} - -``` diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md deleted file mode 100644 index f994f38af365..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/azure.md +++ /dev/null @@ -1,24 +0,0 @@ - - - -# Microsoft Azure storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage][azure-blob-storage] for object storage. - -## Parameters - -The following parameters must be used to authenticate and configure the storage driver (case-sensitive): - -* `accountname`: Name of the Azure Storage Account. -* `accountkey`: Primary or Secondary Key for the Storage Account. -* `container`: Name of the root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api]. -* `realm`: (optional) Domain name suffix for the Storage Service API endpoint. Defaults to `core.windows.net`. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. - -[azure-blob-storage]: http://azure.microsoft.com/en-us/services/storage/ -[create-container-api]: https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md deleted file mode 100644 index 2dbad8cdcd16..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/filesystem.md +++ /dev/null @@ -1,16 +0,0 @@ - - - -# Filesystem storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem. - -## Parameters - -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to `/var/lib/registry`. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/gcs.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/gcs.md deleted file mode 100644 index bb6f2b01a022..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/gcs.md +++ /dev/null @@ -1,22 +0,0 @@ - - - -# Google Cloud Storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses Google Cloud for object storage. - -## Parameters - -`bucket`: The name of your Google Cloud Storage bucket where you wish to store objects (needs to already be created prior to driver initialization). - -`keyfile`: (optional) A private key file in JSON format, used for [Service Account Authentication](https://cloud.google.com/storage/docs/authentication#service_accounts). - -**Note** Instead of a key file you can use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials). - -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md deleted file mode 100644 index f43e1510fd69..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/inmemory.md +++ /dev/null @@ -1,18 +0,0 @@ - - - -# In-memory storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses local memory for object storage. - -**IMPORTANT**: This storage driver *does not* persist data across runs, and primarily exists for testing. - -## Parameters - -None diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md deleted file mode 100644 index f6f2de972bd8..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/oss.md +++ /dev/null @@ -1,33 +0,0 @@ - - -# Aliyun OSS storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses [Aliyun OSS](http://www.aliyun.com/product/oss) for object storage. - -## Parameters - -* `accesskeyid`: Your access key ID. - -* `accesskeysecret`: Your access key secret. - -* `region`: The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at - -* `endpoint`: (optional) By default, the endpoint shoulb be `..aliyuncs.com` or `.-internal.aliyuncs.com` (when internal=true). You can change the default endpoint via changing this value. - -* `internal`: (optional) Using internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at - -* `bucket`: The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization). - -* `encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). - -* `secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true if not specified. - -* `chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS. - -* `rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md deleted file mode 100644 index 4b630e19ae51..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/rados.md +++ /dev/null @@ -1,40 +0,0 @@ - - - -# Ceph RADOS storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses -[Ceph RADOS Object Storage][rados] for storage backend. - -## Parameters - -The following parameters must be used to configure the storage driver -(case-sensitive): - -* `poolname`: Name of the Ceph pool -* `username` *optional*: The user to connect as (i.e. admin, not client.admin) -* `chunksize` *optional*: Size of the written RADOS objects. Default value is -4MB (4194304). - -This drivers loads the [Ceph client configuration][rados-config] from the -following regular paths (the first found is used): - -* `$CEPH_CONF` (environment variable) -* `/etc/ceph/ceph.conf` -* `~/.ceph/config` -* `ceph.conf` (in the current working directory) - -## Developing - -To include this driver when building Docker Distribution, use the build tag -`include_rados`. Please see the [building documentation][building] for details. - -[rados]: http://ceph.com/docs/master/rados/ -[rados-config]: http://ceph.com/docs/master/rados/configuration/ceph-conf/ -[building]: https://github.com/docker/distribution/blob/master/docs/building.md#optional-build-tags diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md deleted file mode 100644 index 3ce5df233aa8..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/s3.md +++ /dev/null @@ -1,34 +0,0 @@ - - - -# S3 storage driver - -An implementation of the `storagedriver.StorageDriver` interface which uses Amazon S3 for object storage. - -## Parameters - -`accesskey`: Your aws access key. - -`secretkey`: Your aws secret key. - -**Note** You can provide empty strings for your access and secret keys if you plan on running the driver on an ec2 instance and will handle authentication with the instance's credentials. - -`region`: The name of the aws region in which you would like to store objects (for example `us-east-1`). For a list of regions, you can look at http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html - -`bucket`: The name of your s3 bucket where you wish to store objects (needs to already be created prior to driver initialization). - -`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). - -`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. - -`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to false if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) - -`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to s3. The default is 10 MB. Keep in mind that the minimum part size for s3 is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to s3. - -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md deleted file mode 100644 index b2f937da295a..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storage-drivers/swift.md +++ /dev/null @@ -1,200 +0,0 @@ - - - -# OpenStack Swift storage driver - -An implementation of the `storagedriver.StorageDriver` interface that uses [OpenStack Swift](http://docs.openstack.org/developer/swift/) for object storage. - -## Parameters - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- authurl - -

URL for obtaining an auth token.

-
- username - -

- Your OpenStack user name.

-

-
- password -

-

-

- Your OpenStack password. -

-
- container - -

- The name of your Swift container where you wish to store objects. The driver creates the named container during its initialization. -

-
- tenant - -

- Optionally, your OpenStack tenant name. You can either use tenant or tenantid. -

-
- tenantid - -

- Optionally, your OpenStack tenant id. You can either use tenant or tenantid. -

-
- domain - -

- Optionally, your OpenStack domain name for Identity v3 API. You can either use domain or domainid. -

-
- domainid - -

- Optionally, your OpenStack domain id for Identity v3 API. You can either use domain or domainid. -

-
- trustid - -

- Optionally, your OpenStack trust id for Identity v3 API. -

-
- insecureskipverify - -

- Optionally, set insecureskipverify to true to skip TLS verification for your OpenStack provider. The driver uses false by default. -

-
- region - -

- Optionally, specify the OpenStack region name in which you would like to store objects (for example fr). -

-
- chunksize - -

- Optionally, specify the segment size for Dynamic Large Objects uploads (performed by WriteStream) to Swift. The default is 5 MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to Swift. -

-
- prefix - -

- Optionally, supply the root directory tree in which to store all registry files. Defaults to the empty string which is the container's root.

-

-
- secretkey - -

- Optionally, the secret key used to generate temporary URLs.

-

-
- accesskey - -

- Optionally, the access key to generate temporary URLs. It is used by HP Cloud Object Storage in addition to the `secretkey` parameter.

-

-
- -The features supported by the Swift server are queried by requesting the `/info` URL on the server. In case the administrator -disabled that feature, the configuration file can specify the following optional parameters : - - - - - - - - - - -
- tempurlcontainerkey - -

- Specify whether to use container secret key to generate temporary URL when set to true, or the account secret key otherwise.

-

-
- tempurlmethods - -

- Array of HTTP methods that are supported by the TempURL middleware of the Swift server. Example:

- - - tempurlmethods: - - GET - - PUT - - HEAD - - POST - - DELETE - -

-
diff --git a/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md b/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md deleted file mode 100644 index 158ad9997c03..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/docs/storagedrivers.md +++ /dev/null @@ -1,65 +0,0 @@ - - - -# Docker Registry Storage Driver - -This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers. - -## Provided Drivers - -This storage driver package comes bundled with several drivers: - -- [inmemory](storage-drivers/inmemory.md): A temporary storage driver using a local inmemory map. This exists solely for reference and testing. -- [filesystem](storage-drivers/filesystem.md): A local storage driver configured to use a directory tree in the local filesystem. -- [s3](storage-drivers/s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. -- [azure](storage-drivers/azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/). -- [rados](storage-drivers/rados.md): A driver storing objects in a [Ceph Object Storage](http://ceph.com/docs/master/rados/) pool. -- [swift](storage-drivers/swift.md): A driver storing objects in [Openstack Swift](http://docs.openstack.org/developer/swift/). -- [oss](storage-drivers/oss.md): A driver storing objects in [Aliyun OSS](http://www.aliyun.com/product/oss). -- [gcs](storage-drivers/gcs.md): A driver storing objects in a [Google Cloud Storage](https://cloud.google.com/storage/) bucket. - -## Storage Driver API - -The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems. - -Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key. - -Storage drivers are intended to be written in Go, providing compile-time -validation of the `storagedriver.StorageDriver` interface. - -## Driver Selection and Configuration - -The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. - -Storage driver factories may be registered by name using the -`factory.Register` method, and then later invoked by calling `factory.Create` -with a driver name and parameters map. If no such storage driver can be found, -`factory.Create` will return an `InvalidStorageDriverError`. - -## Driver Contribution - -### Writing new storage drivers - -To create a valid storage driver, one must implement the -`storagedriver.StorageDriver` interface and make sure to expose this driver -via the factory system. - -#### Registering - -Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. - -## Testing - -Storage driver test suites are provided in -`storagedriver/testsuites/testsuites.go` and may be used for any storage -driver written in Go. Tests can be registered using the `RegisterSuite` -function, which run the same set of tests for any registered drivers. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/errors.go index f4c2603c26b2..c20f28113ce5 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/errors.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/errors.go @@ -20,6 +20,15 @@ var ErrManifestNotModified = errors.New("manifest not modified") // performed var ErrUnsupported = errors.New("operation unsupported") +// ErrTagUnknown is returned if the given tag is not known by the tag service +type ErrTagUnknown struct { + Tag string +} + +func (err ErrTagUnknown) Error() string { + return fmt.Sprintf("unknown tag=%s", err.Tag) +} + // ErrRepositoryUnknown is returned if the named repository is not known by // the registry. type ErrRepositoryUnknown struct { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go b/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go deleted file mode 100644 index 73fcc4535f8b..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/health/api/api.go +++ /dev/null @@ -1,37 +0,0 @@ -package api - -import ( - "errors" - "net/http" - - "github.com/docker/distribution/health" -) - -var ( - updater = health.NewStatusUpdater() -) - -// DownHandler registers a manual_http_status that always returns an Error -func DownHandler(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { - updater.Update(errors.New("Manual Check")) - } else { - w.WriteHeader(http.StatusNotFound) - } -} - -// UpHandler registers a manual_http_status that always returns nil -func UpHandler(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { - updater.Update(nil) - } else { - w.WriteHeader(http.StatusNotFound) - } -} - -// init sets up the two endpoints to bring the service up and down -func init() { - health.Register("manual_http_status", updater) - http.HandleFunc("/debug/health/down", DownHandler) - http.HandleFunc("/debug/health/up", UpHandler) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/health/api/api_test.go b/Godeps/_workspace/src/github.com/docker/distribution/health/api/api_test.go deleted file mode 100644 index ec82154f6ef2..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/health/api/api_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package api - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/docker/distribution/health" -) - -// TestGETDownHandlerDoesNotChangeStatus ensures that calling the endpoint -// /debug/health/down with METHOD GET returns a 404 -func TestGETDownHandlerDoesNotChangeStatus(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/down", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - DownHandler(recorder, req) - - if recorder.Code != 404 { - t.Errorf("Did not get a 404.") - } -} - -// TestGETUpHandlerDoesNotChangeStatus ensures that calling the endpoint -// /debug/health/down with METHOD GET returns a 404 -func TestGETUpHandlerDoesNotChangeStatus(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/up", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - DownHandler(recorder, req) - - if recorder.Code != 404 { - t.Errorf("Did not get a 404.") - } -} - -// TestPOSTDownHandlerChangeStatus ensures the endpoint /debug/health/down changes -// the status code of the response to 503 -// This test is order dependent, and should come before TestPOSTUpHandlerChangeStatus -func TestPOSTDownHandlerChangeStatus(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/down", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - DownHandler(recorder, req) - - if recorder.Code != 200 { - t.Errorf("Did not get a 200.") - } - - if len(health.CheckStatus()) != 1 { - t.Errorf("DownHandler didn't add an error check.") - } -} - -// TestPOSTUpHandlerChangeStatus ensures the endpoint /debug/health/up changes -// the status code of the response to 200 -func TestPOSTUpHandlerChangeStatus(t *testing.T) { - recorder := httptest.NewRecorder() - - req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/up", nil) - if err != nil { - t.Errorf("Failed to create request.") - } - - UpHandler(recorder, req) - - if recorder.Code != 200 { - t.Errorf("Did not get a 200.") - } - - if len(health.CheckStatus()) != 0 { - t.Errorf("UpHandler didn't remove the error check.") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go new file mode 100644 index 000000000000..a2082ec02f5b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go @@ -0,0 +1,155 @@ +package manifestlist + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +// MediaTypeManifestList specifies the mediaType for manifest lists. +const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + +// SchemaVersion provides a pre-initialized version structure for this +// packages version of the manifest. +var SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifestList, +} + +func init() { + manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// PlatformSpec specifies a platform where a particular image manifest is +// applicable. +type PlatformSpec struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example `10.0.10586`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifying an array of strings, each + // listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} + +// A ManifestDescriptor references a platform-specific manifest. +type ManifestDescriptor struct { + distribution.Descriptor + + // Platform specifies which platform the manifest pointed to by the + // descriptor runs on. + Platform PlatformSpec `json:"platform"` +} + +// ManifestList references manifests for various platforms. +type ManifestList struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Manifests []ManifestDescriptor `json:"manifests"` +} + +// References returnes the distribution descriptors for the referenced image +// manifests. +func (m ManifestList) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(m.Manifests)) + for i := range m.Manifests { + dependencies[i] = m.Manifests[i].Descriptor + } + + return dependencies +} + +// DeserializedManifestList wraps ManifestList with a copy of the original +// JSON. +type DeserializedManifestList struct { + ManifestList + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromDescriptors takes a slice of descriptors, and returns a +// DeserializedManifestList which contains the resulting manifest list +// and its JSON representation. +func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { + m := ManifestList{ + Versioned: SchemaVersion, + } + + m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) + copy(m.Manifests, descriptors) + + deserialized := DeserializedManifestList{ + ManifestList: m, + } + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new ManifestList struct from JSON data. +func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest list in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into ManifestList object + var manifestList ManifestList + if err := json.Unmarshal(m.canonical, &manifestList); err != nil { + return err + } + + m.ManifestList = manifestList + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifestList") +} + +// Payload returns the raw content of the manifest list. The contents can be +// used to calculate the content identifier. +func (m DeserializedManifestList) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifestlist/manifestlist_test.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifestlist/manifestlist_test.go new file mode 100644 index 000000000000..09e6ed1f0afd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/manifestlist/manifestlist_test.go @@ -0,0 +1,111 @@ +package manifestlist + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/docker/distribution" +) + +var expectedManifestListSerialization = []byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 985, + "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + "platform": { + "architecture": "amd64", + "os": "linux", + "features": [ + "sse4" + ] + } + }, + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 2392, + "digest": "sha256:6346340964309634683409684360934680934608934608934608934068934608", + "platform": { + "architecture": "sun4m", + "os": "sunos" + } + } + ] +}`) + +func TestManifestList(t *testing.T) { + manifestDescriptors := []ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 985, + MediaType: "application/vnd.docker.distribution.manifest.v2+json", + }, + Platform: PlatformSpec{ + Architecture: "amd64", + OS: "linux", + Features: []string{"sse4"}, + }, + }, + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:6346340964309634683409684360934680934608934608934608934068934608", + Size: 2392, + MediaType: "application/vnd.docker.distribution.manifest.v2+json", + }, + Platform: PlatformSpec{ + Architecture: "sun4m", + OS: "sunos", + }, + }, + } + + deserialized, err := FromDescriptors(manifestDescriptors) + if err != nil { + t.Fatalf("error creating DeserializedManifestList: %v", err) + } + + mediaType, canonical, err := deserialized.Payload() + + if mediaType != MediaTypeManifestList { + t.Fatalf("unexpected media type: %s", mediaType) + } + + // Check that the canonical field is the same as json.MarshalIndent + // with these parameters. + p, err := json.MarshalIndent(&deserialized.ManifestList, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest list: %v", err) + } + if !bytes.Equal(p, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p)) + } + + // Check that the canonical field has the expected value. + if !bytes.Equal(expectedManifestListSerialization, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestListSerialization)) + } + + var unmarshalled DeserializedManifestList + if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil { + t.Fatalf("error unmarshaling manifest: %v", err) + } + + if !reflect.DeepEqual(&unmarshalled, deserialized) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) + } + + references := deserialized.References() + if len(references) != 2 { + t.Fatalf("unexpected number of references: %d", len(references)) + } + for i := range references { + if !reflect.DeepEqual(references[i], manifestDescriptors[i].Descriptor) { + t.Fatalf("unexpected value %d returned by References: %v", i, references[i]) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/config_builder.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/config_builder.go new file mode 100644 index 000000000000..5cdd76796c84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/config_builder.go @@ -0,0 +1,283 @@ +package schema1 + +import ( + "crypto/sha512" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +type diffID digest.Digest + +// gzippedEmptyTar is a gzip-compressed version of an empty tar file +// (1024 NULL bytes) +var gzippedEmptyTar = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// digestSHA256GzippedEmptyTar is the canonical sha256 digest of +// gzippedEmptyTar +const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +// configManifestBuilder is a type for constructing manifests from an image +// configuration and generic descriptors. +type configManifestBuilder struct { + // bs is a BlobService used to create empty layer tars in the + // blob store if necessary. + bs distribution.BlobService + // pk is the libtrust private key used to sign the final manifest. + pk libtrust.PrivateKey + // configJSON is configuration supplied when the ManifestBuilder was + // created. + configJSON []byte + // ref contains the name and optional tag provided to NewConfigManifestBuilder. + ref reference.Named + // descriptors is the set of descriptors referencing the layers. + descriptors []distribution.Descriptor + // emptyTarDigest is set to a valid digest if an empty tar has been + // put in the blob store; otherwise it is empty. + emptyTarDigest digest.Digest +} + +// NewConfigManifestBuilder is used to build new manifests for the current +// schema version from an image configuration and a set of descriptors. +// It takes a BlobService so that it can add an empty tar to the blob store +// if the resulting manifest needs empty layers. +func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder { + return &configManifestBuilder{ + bs: bs, + pk: pk, + configJSON: configJSON, + ref: ref, + } +} + +// Build produces a final manifest from the given references +func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { + type imageRootFS struct { + Type string `json:"type"` + DiffIDs []diffID `json:"diff_ids,omitempty"` + BaseLayer string `json:"base_layer,omitempty"` + } + + type imageHistory struct { + Created time.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` + } + + type imageConfig struct { + RootFS *imageRootFS `json:"rootfs,omitempty"` + History []imageHistory `json:"history,omitempty"` + Architecture string `json:"architecture,omitempty"` + } + + var img imageConfig + + if err := json.Unmarshal(mb.configJSON, &img); err != nil { + return nil, err + } + + if len(img.History) == 0 { + return nil, errors.New("empty history when trying to create schema1 manifest") + } + + if len(img.RootFS.DiffIDs) != len(mb.descriptors) { + return nil, errors.New("number of descriptors and number of layers in rootfs must match") + } + + // Generate IDs for each layer + // For non-top-level layers, create fake V1Compatibility strings that + // fit the format and don't collide with anything else, but don't + // result in runnable images on their own. + type v1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + } + + fsLayerList := make([]FSLayer, len(img.History)) + history := make([]History, len(img.History)) + + parent := "" + layerCounter := 0 + for i, h := range img.History[:len(img.History)-1] { + var blobsum digest.Digest + if h.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + layerCounter++ + } + + v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() + + if i == 0 && img.RootFS.BaseLayer != "" { + // windows-only baselayer setup + baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) + parent = fmt.Sprintf("%x", baseID[:32]) + } + + v1Compatibility := v1Compatibility{ + ID: v1ID, + Parent: parent, + Comment: h.Comment, + Created: h.Created, + Author: h.Author, + } + v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} + if h.EmptyLayer { + v1Compatibility.ThrowAway = true + } + jsonBytes, err := json.Marshal(&v1Compatibility) + if err != nil { + return nil, err + } + + reversedIndex := len(img.History) - i - 1 + history[reversedIndex].V1Compatibility = string(jsonBytes) + fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} + + parent = v1ID + } + + latestHistory := img.History[len(img.History)-1] + + var blobsum digest.Digest + if latestHistory.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + } + + fsLayerList[0] = FSLayer{BlobSum: blobsum} + dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) + + // Top-level v1compatibility string should be a modified version of the + // image config. + transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) + if err != nil { + return nil, err + } + + history[0].V1Compatibility = string(transformedConfig) + + tag := "" + if tagged, isTagged := mb.ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + mfst := Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: mb.ref.Name(), + Tag: tag, + Architecture: img.Architecture, + FSLayers: fsLayerList, + History: history, + } + + return Sign(&mfst, mb.pk) +} + +// emptyTar pushes a compressed empty tar to the blob store if one doesn't +// already exist, and returns its blobsum. +func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { + if mb.emptyTarDigest != "" { + // Already put an empty tar + return mb.emptyTarDigest, nil + } + + descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar) + switch err { + case nil: + mb.emptyTarDigest = descriptor.Digest + return descriptor.Digest, nil + case distribution.ErrBlobUnknown: + // nop + default: + return "", err + } + + // Add gzipped empty tar to the blob store + descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar) + if err != nil { + return "", err + } + + mb.emptyTarDigest = descriptor.Digest + + return descriptor.Digest, nil +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { + // todo: verification here? + mb.descriptors = append(mb.descriptors, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder +func (mb *configManifestBuilder) References() []distribution.Descriptor { + return mb.descriptors +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON +func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(configJSON, &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + delete(configAsMap, "rootfs") + delete(configAsMap, "history") + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/config_builder_test.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/config_builder_test.go new file mode 100644 index 000000000000..5f9abaa9f84b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/config_builder_test.go @@ -0,0 +1,272 @@ +package schema1 + +import ( + "bytes" + "compress/gzip" + "io" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +type mockBlobService struct { + descriptors map[digest.Digest]distribution.Descriptor +} + +func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if descriptor, ok := bs.descriptors[dgst]; ok { + return descriptor, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} + +func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + d := distribution.Descriptor{ + Digest: digest.FromBytes(p), + Size: int64(len(p)), + MediaType: mediaType, + } + bs.descriptors[d.Digest] = d + return d, nil +} + +func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func TestEmptyTar(t *testing.T) { + // Confirm that gzippedEmptyTar expands to 1024 NULL bytes. + var decompressed [2048]byte + gzipReader, err := gzip.NewReader(bytes.NewReader(gzippedEmptyTar)) + if err != nil { + t.Fatalf("NewReader returned error: %v", err) + } + n, err := gzipReader.Read(decompressed[:]) + if n != 1024 { + t.Fatalf("read returned %d bytes; expected 1024", n) + } + n, err = gzipReader.Read(decompressed[1024:]) + if n != 0 { + t.Fatalf("read returned %d bytes; expected 0", n) + } + if err != io.EOF { + t.Fatal("read did not return io.EOF") + } + gzipReader.Close() + for _, b := range decompressed[:1024] { + if b != 0 { + t.Fatal("nonzero byte in decompressed tar") + } + } + + // Confirm that digestSHA256EmptyTar is the digest of gzippedEmptyTar. + dgst := digest.FromBytes(gzippedEmptyTar) + if dgst != digestSHA256GzippedEmptyTar { + t.Fatalf("digest mismatch for empty tar: expected %s got %s", digestSHA256GzippedEmptyTar, dgst) + } +} + +func TestConfigBuilder(t *testing.T) { + imgJSON := `{ + "architecture": "amd64", + "config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "echo hi" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001", + "container_config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "created": "2015-11-04T23:06:32.365666163Z", + "docker_version": "1.9.0-dev", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + }, + { + "created": "2015-11-04T23:06:30.934316144Z", + "created_by": "/bin/sh -c #(nop) ENV derived=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:31.192097572Z", + "created_by": "/bin/sh -c #(nop) ENV asdf=true", + "empty_layer": true + }, + { + "author": "Alyssa P. Hacker \u003calyspdev@example.com\u003e", + "created": "2015-11-04T23:06:32.083868454Z", + "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024" + }, + { + "created": "2015-11-04T23:06:32.365666163Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]", + "empty_layer": true + } + ], + "os": "linux", + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49" + ], + "type": "layers" + } +}` + + descriptors := []distribution.Descriptor{ + {Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + {Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("could not generate key for testing: %v", err) + } + + bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)} + + ref, err := reference.ParseNamed("testrepo:testtag") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + builder := NewConfigManifestBuilder(bs, pk, ref, []byte(imgJSON)) + + for _, d := range descriptors { + if err := builder.AppendReference(d); err != nil { + t.Fatalf("AppendReference returned error: %v", err) + } + } + + signed, err := builder.Build(context.Background()) + if err != nil { + t.Fatalf("Build returned error: %v", err) + } + + // Check that the gzipped empty layer tar was put in the blob store + _, err = bs.Stat(context.Background(), digestSHA256GzippedEmptyTar) + if err != nil { + t.Fatal("gzipped empty tar was not put in the blob store") + } + + manifest := signed.(*SignedManifest).Manifest + + if manifest.Versioned.SchemaVersion != 1 { + t.Fatal("SchemaVersion != 1") + } + if manifest.Name != "testrepo" { + t.Fatal("incorrect name in manifest") + } + if manifest.Tag != "testtag" { + t.Fatal("incorrect tag in manifest") + } + if manifest.Architecture != "amd64" { + t.Fatal("incorrect arch in manifest") + } + + expectedFSLayers := []FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + if len(manifest.FSLayers) != len(expectedFSLayers) { + t.Fatalf("wrong number of FSLayers: %d", len(manifest.FSLayers)) + } + if !reflect.DeepEqual(manifest.FSLayers, expectedFSLayers) { + t.Fatal("wrong FSLayers list") + } + + expectedV1Compatibility := []string{ + `{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"69e5c1bfadad697fdb6db59f6326648fa119e0c031a0eda33b8cfadcab54ba7f","os":"linux","parent":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","throwaway":true}`, + `{"id":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","parent":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]},"author":"Alyssa P. Hacker \u003calyspdev@example.com\u003e"}`, + `{"id":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","parent":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`, + `{"id":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`, + `{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`, + `{"id":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:54.690851953Z","container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"]}}`, + } + + if len(manifest.History) != len(expectedV1Compatibility) { + t.Fatalf("wrong number of history entries: %d", len(manifest.History)) + } + for i := range expectedV1Compatibility { + if manifest.History[i].V1Compatibility != expectedV1Compatibility[i] { + t.Errorf("wrong V1Compatibility %d. expected:\n%s\ngot:\n%s", i, expectedV1Compatibility[i], manifest.History[i].V1Compatibility) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/manifest.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/manifest.go index e7cbf958ed19..bff47bde05f5 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/manifest.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/manifest.go @@ -2,20 +2,22 @@ package schema1 import ( "encoding/json" + "fmt" + "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/libtrust" ) -// TODO(stevvooe): When we rev the manifest format, the contents of this -// package should be moved to manifest/v1. - const ( - // ManifestMediaType specifies the mediaType for the current version. Note - // that for schema version 1, the the media is optionally - // "application/json". - ManifestMediaType = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeManifest specifies the mediaType for the current version. Note + // that for schema version 1, the the media is optionally "application/json". + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version + MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // MediaTypeManifestLayer specifies the media type for manifest layers + MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" ) var ( @@ -26,6 +28,47 @@ var ( } ) +func init() { + schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + sm := new(SignedManifest) + err := sm.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + desc := distribution.Descriptor{ + Digest: digest.FromBytes(sm.Canonical), + Size: int64(len(sm.Canonical)), + MediaType: MediaTypeSignedManifest, + } + return sm, desc, err + } + err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("application/json", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// History stores unstructured v1 compatibility information +type History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + // Manifest provides the base accessible fields for working with V2 image // format in the registry. type Manifest struct { @@ -49,59 +92,64 @@ type Manifest struct { } // SignedManifest provides an envelope for a signed image manifest, including -// the format sensitive raw bytes. It contains fields to +// the format sensitive raw bytes. type SignedManifest struct { Manifest - // Raw is the byte representation of the ImageManifest, used for signature - // verification. The value of Raw must be used directly during - // serialization, or the signature check will fail. The manifest byte + // Canonical is the canonical byte representation of the ImageManifest, + // without any attached signatures. The manifest byte // representation cannot change or it will have to be re-signed. - Raw []byte `json:"-"` + Canonical []byte `json:"-"` + + // all contains the byte representation of the Manifest including signatures + // and is returned by Payload() + all []byte } -// UnmarshalJSON populates a new ImageManifest struct from JSON data. +// UnmarshalJSON populates a new SignedManifest struct from JSON data. func (sm *SignedManifest) UnmarshalJSON(b []byte) error { - sm.Raw = make([]byte, len(b), len(b)) - copy(sm.Raw, b) + sm.all = make([]byte, len(b), len(b)) + // store manifest and signatures in all + copy(sm.all, b) - p, err := sm.Payload() + jsig, err := libtrust.ParsePrettySignature(b, "signatures") if err != nil { return err } + // Resolve the payload in the manifest. + bytes, err := jsig.Payload() + if err != nil { + return err + } + + // sm.Canonical stores the canonical manifest JSON + sm.Canonical = make([]byte, len(bytes), len(bytes)) + copy(sm.Canonical, bytes) + + // Unmarshal canonical JSON into Manifest object var manifest Manifest - if err := json.Unmarshal(p, &manifest); err != nil { + if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { return err } sm.Manifest = manifest + return nil } -// Payload returns the raw, signed content of the signed manifest. The -// contents can be used to calculate the content identifier. -func (sm *SignedManifest) Payload() ([]byte, error) { - jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") - if err != nil { - return nil, err +// References returnes the descriptors of this manifests references +func (sm SignedManifest) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) + for i, fsLayer := range sm.FSLayers { + dependencies[i] = distribution.Descriptor{ + MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", + Digest: fsLayer.BlobSum, + } } - // Resolve the payload in the manifest. - return jsig.Payload() -} - -// Signatures returns the signatures as provided by -// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws -// signatures. -func (sm *SignedManifest) Signatures() ([][]byte, error) { - jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") - if err != nil { - return nil, err - } + return dependencies - // Resolve the payload in the manifest. - return jsig.Signatures() } // MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner @@ -109,22 +157,28 @@ func (sm *SignedManifest) Signatures() ([][]byte, error) { // use Raw directly, since the the content produced by json.Marshal will be // compacted and will fail signature checks. func (sm *SignedManifest) MarshalJSON() ([]byte, error) { - if len(sm.Raw) > 0 { - return sm.Raw, nil + if len(sm.all) > 0 { + return sm.all, nil } // If the raw data is not available, just dump the inner content. return json.Marshal(&sm.Manifest) } -// FSLayer is a container struct for BlobSums defined in an image manifest -type FSLayer struct { - // BlobSum is the tarsum of the referenced filesystem image layer - BlobSum digest.Digest `json:"blobSum"` +// Payload returns the signed content of the signed manifest. +func (sm SignedManifest) Payload() (string, []byte, error) { + return MediaTypeSignedManifest, sm.all, nil } -// History stores unstructured v1 compatibility information -type History struct { - // V1Compatibility is the raw v1 compatibility information - V1Compatibility string `json:"v1Compatibility"` +// Signatures returns the signatures as provided by +// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws +// signatures. +func (sm *SignedManifest) Signatures() ([][]byte, error) { + jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + // Resolve the payload in the manifest. + return jsig.Signatures() } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/manifest_test.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/manifest_test.go index 7d0d382db10f..05bb8ec57a91 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/manifest_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/manifest_test.go @@ -19,15 +19,15 @@ type testEnv struct { func TestManifestMarshaling(t *testing.T) { env := genEnv(t) - // Check that the Raw field is the same as json.MarshalIndent with these + // Check that the all field is the same as json.MarshalIndent with these // parameters. p, err := json.MarshalIndent(env.signed, "", " ") if err != nil { t.Fatalf("error marshaling manifest: %v", err) } - if !bytes.Equal(p, env.signed.Raw) { - t.Fatalf("manifest bytes not equal: %q != %q", string(env.signed.Raw), string(p)) + if !bytes.Equal(p, env.signed.all) { + t.Fatalf("manifest bytes not equal: %q != %q", string(env.signed.all), string(p)) } } @@ -35,7 +35,7 @@ func TestManifestUnmarshaling(t *testing.T) { env := genEnv(t) var signed SignedManifest - if err := json.Unmarshal(env.signed.Raw, &signed); err != nil { + if err := json.Unmarshal(env.signed.all, &signed); err != nil { t.Fatalf("error unmarshaling signed manifest: %v", err) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/reference_builder.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/reference_builder.go new file mode 100644 index 000000000000..fc1045f9ea43 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/reference_builder.go @@ -0,0 +1,98 @@ +package schema1 + +import ( + "fmt" + + "errors" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +// referenceManifestBuilder is a type for constructing manifests from schema1 +// dependencies. +type referenceManifestBuilder struct { + Manifest + pk libtrust.PrivateKey +} + +// NewReferenceManifestBuilder is used to build new manifests for the current +// schema version using schema1 dependencies. +func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder { + tag := "" + if tagged, isTagged := ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + return &referenceManifestBuilder{ + Manifest: Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: ref.Name(), + Tag: tag, + Architecture: architecture, + }, + pk: pk, + } +} + +func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { + m := mb.Manifest + if len(m.FSLayers) == 0 { + return nil, errors.New("cannot build manifest with zero layers or history") + } + + m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) + m.History = make([]History, len(mb.Manifest.History)) + copy(m.FSLayers, mb.Manifest.FSLayers) + copy(m.History, mb.Manifest.History) + + return Sign(&m, mb.pk) +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { + r, ok := d.(Reference) + if !ok { + return fmt.Errorf("Unable to add non-reference type to v1 builder") + } + + // Entries need to be prepended + mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) + mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) + return nil + +} + +// References returns the current references added to this builder +func (mb *referenceManifestBuilder) References() []distribution.Descriptor { + refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) + for i := range mb.Manifest.FSLayers { + layerDigest := mb.Manifest.FSLayers[i].BlobSum + history := mb.Manifest.History[i] + ref := Reference{layerDigest, 0, history} + refs[i] = ref.Descriptor() + } + return refs +} + +// Reference describes a manifest v2, schema version 1 dependency. +// An FSLayer associated with a history entry. +type Reference struct { + Digest digest.Digest + Size int64 // if we know it, set it for the descriptor. + History History +} + +// Descriptor describes a reference +func (r Reference) Descriptor() distribution.Descriptor { + return distribution.Descriptor{ + MediaType: MediaTypeManifestLayer, + Digest: r.Digest, + Size: r.Size, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/reference_builder_test.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/reference_builder_test.go new file mode 100644 index 000000000000..35db28e466af --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/reference_builder_test.go @@ -0,0 +1,108 @@ +package schema1 + +import ( + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +func makeSignedManifest(t *testing.T, pk libtrust.PrivateKey, refs []Reference) *SignedManifest { + u := &Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: "foo/bar", + Tag: "latest", + Architecture: "amd64", + } + + for i := len(refs) - 1; i >= 0; i-- { + u.FSLayers = append(u.FSLayers, FSLayer{ + BlobSum: refs[i].Digest, + }) + u.History = append(u.History, History{ + V1Compatibility: refs[i].History.V1Compatibility, + }) + } + + signedManifest, err := Sign(u, pk) + if err != nil { + t.Fatalf("unexpected error signing manifest: %v", err) + } + return signedManifest +} + +func TestReferenceBuilder(t *testing.T) { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + r1 := Reference{ + Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + Size: 1, + History: History{V1Compatibility: "{\"a\" : 1 }"}, + } + r2 := Reference{ + Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + Size: 2, + History: History{V1Compatibility: "{\"\a\" : 2 }"}, + } + + handCrafted := makeSignedManifest(t, pk, []Reference{r1, r2}) + + ref, err := reference.ParseNamed(handCrafted.Manifest.Name) + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + ref, err = reference.WithTag(ref, handCrafted.Manifest.Tag) + if err != nil { + t.Fatalf("could not add tag: %v", err) + } + + b := NewReferenceManifestBuilder(pk, ref, handCrafted.Manifest.Architecture) + _, err = b.Build(context.Background()) + if err == nil { + t.Fatal("Expected error building zero length manifest") + } + + err = b.AppendReference(r1) + if err != nil { + t.Fatal(err) + } + + err = b.AppendReference(r2) + if err != nil { + t.Fatal(err) + } + + refs := b.References() + if len(refs) != 2 { + t.Fatalf("Unexpected reference count : %d != %d", 2, len(refs)) + } + + // Ensure ordering + if refs[0].Digest != r2.Digest { + t.Fatalf("Unexpected reference : %v", refs[0]) + } + + m, err := b.Build(context.Background()) + if err != nil { + t.Fatal(err) + } + + built, ok := m.(*SignedManifest) + if !ok { + t.Fatalf("unexpected type from Build() : %T", built) + } + + d1 := digest.FromBytes(built.Canonical) + d2 := digest.FromBytes(handCrafted.Canonical) + if d1 != d2 { + t.Errorf("mismatching canonical JSON") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/sign.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/sign.go index 1b7b674a0396..c862dd8123b4 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/sign.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/sign.go @@ -31,8 +31,9 @@ func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { } return &SignedManifest{ - Manifest: *m, - Raw: pretty, + Manifest: *m, + all: pretty, + Canonical: p, }, nil } @@ -60,7 +61,8 @@ func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certifica } return &SignedManifest{ - Manifest: *m, - Raw: pretty, + Manifest: *m, + all: pretty, + Canonical: p, }, nil } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/verify.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/verify.go index 60f8cda074a7..fa8daa56f55b 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/verify.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema1/verify.go @@ -10,7 +10,7 @@ import ( // Verify verifies the signature of the signed manifest returning the public // keys used during signing. func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { - js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") if err != nil { logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") return nil, err @@ -23,7 +23,7 @@ func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { // certificate pool returning the list of verified chains. Signatures without // an x509 chain are not checked. func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { - js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") if err != nil { return nil, err } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/builder.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/builder.go new file mode 100644 index 000000000000..44b94eaae901 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/builder.go @@ -0,0 +1,77 @@ +package schema2 + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// builder is a type for constructing manifests. +type builder struct { + // bs is a BlobService used to publish the configuration blob. + bs distribution.BlobService + + // configJSON references + configJSON []byte + + // layers is a list of layer descriptors that gets built by successive + // calls to AppendReference. + layers []distribution.Descriptor +} + +// NewManifestBuilder is used to build new manifests for the current schema +// version. It takes a BlobService so it can publish the configuration blob +// as part of the Build process. +func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { + mb := &builder{ + bs: bs, + configJSON: make([]byte, len(configJSON)), + } + copy(mb.configJSON, configJSON) + + return mb +} + +// Build produces a final manifest from the given references. +func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { + m := Manifest{ + Versioned: SchemaVersion, + Layers: make([]distribution.Descriptor, len(mb.layers)), + } + copy(m.Layers, mb.layers) + + configDigest := digest.FromBytes(mb.configJSON) + + var err error + m.Config, err = mb.bs.Stat(ctx, configDigest) + switch err { + case nil: + return FromStruct(m) + case distribution.ErrBlobUnknown: + // nop + default: + return nil, err + } + + // Add config to the blob store + m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = MediaTypeConfig + if err != nil { + return nil, err + } + + return FromStruct(m) +} + +// AppendReference adds a reference to the current ManifestBuilder. +func (mb *builder) AppendReference(d distribution.Describable) error { + mb.layers = append(mb.layers, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder. +func (mb *builder) References() []distribution.Descriptor { + return mb.layers +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/builder_test.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/builder_test.go new file mode 100644 index 000000000000..02ed401bf32f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/builder_test.go @@ -0,0 +1,210 @@ +package schema2 + +import ( + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +type mockBlobService struct { + descriptors map[digest.Digest]distribution.Descriptor +} + +func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if descriptor, ok := bs.descriptors[dgst]; ok { + return descriptor, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} + +func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + d := distribution.Descriptor{ + Digest: digest.FromBytes(p), + Size: int64(len(p)), + MediaType: "application/octet-stream", + } + bs.descriptors[d.Digest] = d + return d, nil +} + +func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func TestBuilder(t *testing.T) { + imgJSON := []byte(`{ + "architecture": "amd64", + "config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "echo hi" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001", + "container_config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "created": "2015-11-04T23:06:32.365666163Z", + "docker_version": "1.9.0-dev", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + }, + { + "created": "2015-11-04T23:06:30.934316144Z", + "created_by": "/bin/sh -c #(nop) ENV derived=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:31.192097572Z", + "created_by": "/bin/sh -c #(nop) ENV asdf=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:32.083868454Z", + "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024" + }, + { + "created": "2015-11-04T23:06:32.365666163Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]", + "empty_layer": true + } + ], + "os": "linux", + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49" + ], + "type": "layers" + } +}`) + configDigest := digest.FromBytes(imgJSON) + + descriptors := []distribution.Descriptor{ + { + Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + Size: 5312, + MediaType: MediaTypeLayer, + }, + { + Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + Size: 235231, + MediaType: MediaTypeLayer, + }, + { + Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + Size: 639152, + MediaType: MediaTypeLayer, + }, + } + + bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)} + builder := NewManifestBuilder(bs, imgJSON) + + for _, d := range descriptors { + if err := builder.AppendReference(d); err != nil { + t.Fatalf("AppendReference returned error: %v", err) + } + } + + built, err := builder.Build(context.Background()) + if err != nil { + t.Fatalf("Build returned error: %v", err) + } + + // Check that the config was put in the blob store + _, err = bs.Stat(context.Background(), configDigest) + if err != nil { + t.Fatal("config was not put in the blob store") + } + + manifest := built.(*DeserializedManifest).Manifest + + if manifest.Versioned.SchemaVersion != 2 { + t.Fatal("SchemaVersion != 2") + } + + target := manifest.Target() + if target.Digest != configDigest { + t.Fatalf("unexpected digest in target: %s", target.Digest.String()) + } + if target.MediaType != MediaTypeConfig { + t.Fatalf("unexpected media type in target: %s", target.MediaType) + } + if target.Size != 3153 { + t.Fatalf("unexpected size in target: %d", target.Size) + } + + references := manifest.References() + + if !reflect.DeepEqual(references, descriptors) { + t.Fatal("References() does not match the descriptors added") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/manifest.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/manifest.go new file mode 100644 index 000000000000..355b5ad4ea21 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/manifest.go @@ -0,0 +1,128 @@ +package schema2 + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" + + // MediaTypeConfig specifies the mediaType for the image configuration. + MediaTypeConfig = "application/vnd.docker.container.image.v1+json" + + // MediaTypeLayer is the mediaType used for layers referenced by the + // manifest. + MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" + + // MediaTypeForeignLayer is the mediaType used for layers that must be + // downloaded from foreign URLs. + MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifest, + } +) + +func init() { + schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a schema2 manifest. +type Manifest struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Config distribution.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []distribution.Descriptor `json:"layers"` +} + +// References returnes the descriptors of this manifests references. +func (m Manifest) References() []distribution.Descriptor { + return m.Layers +} + +// Target returns the target of this signed manifest. +func (m Manifest) Target() distribution.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(m.canonical, &manifest); err != nil { + return err + } + + m.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/manifest_test.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/manifest_test.go new file mode 100644 index 000000000000..459d614cdde6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/schema2/manifest_test.go @@ -0,0 +1,105 @@ +package schema2 + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/docker/distribution" +) + +var expectedManifestSerialization = []byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 985, + "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 153263, + "digest": "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" + } + ] +}`) + +func TestManifest(t *testing.T) { + manifest := Manifest{ + Versioned: SchemaVersion, + Config: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 985, + MediaType: MediaTypeConfig, + }, + Layers: []distribution.Descriptor{ + { + Digest: "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b", + Size: 153263, + MediaType: MediaTypeLayer, + }, + }, + } + + deserialized, err := FromStruct(manifest) + if err != nil { + t.Fatalf("error creating DeserializedManifest: %v", err) + } + + mediaType, canonical, err := deserialized.Payload() + + if mediaType != MediaTypeManifest { + t.Fatalf("unexpected media type: %s", mediaType) + } + + // Check that the canonical field is the same as json.MarshalIndent + // with these parameters. + p, err := json.MarshalIndent(&manifest, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest: %v", err) + } + if !bytes.Equal(p, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p)) + } + + // Check that canonical field matches expected value. + if !bytes.Equal(expectedManifestSerialization, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestSerialization)) + } + + var unmarshalled DeserializedManifest + if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil { + t.Fatalf("error unmarshaling manifest: %v", err) + } + + if !reflect.DeepEqual(&unmarshalled, deserialized) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) + } + + target := deserialized.Target() + if target.Digest != "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" { + t.Fatalf("unexpected digest in target: %s", target.Digest.String()) + } + if target.MediaType != MediaTypeConfig { + t.Fatalf("unexpected media type in target: %s", target.MediaType) + } + if target.Size != 985 { + t.Fatalf("unexpected size in target: %d", target.Size) + } + + references := deserialized.References() + if len(references) != 1 { + t.Fatalf("unexpected number of references: %d", len(references)) + } + if references[0].Digest != "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" { + t.Fatalf("unexpected digest in reference: %s", references[0].Digest.String()) + } + if references[0].MediaType != MediaTypeLayer { + t.Fatalf("unexpected media type in reference: %s", references[0].MediaType) + } + if references[0].Size != 153263 { + t.Fatalf("unexpected size in reference: %d", references[0].Size) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifest/versioned.go b/Godeps/_workspace/src/github.com/docker/distribution/manifest/versioned.go index bef382925137..c57398bde644 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/manifest/versioned.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifest/versioned.go @@ -1,9 +1,12 @@ package manifest -// Versioned provides a struct with just the manifest schemaVersion. Incoming +// Versioned provides a struct with the manifest schemaVersion and . Incoming // content with unknown schema version can be decoded against this struct to // check the version. type Versioned struct { // SchemaVersion is the image manifest schema that this image follows SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/manifests.go b/Godeps/_workspace/src/github.com/docker/distribution/manifests.go new file mode 100644 index 000000000000..3bf912a6597e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/manifests.go @@ -0,0 +1,123 @@ +package distribution + +import ( + "fmt" + "mime" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// Manifest represents a registry object specifying a set of +// references and an optional target +type Manifest interface { + // References returns a list of objects which make up this manifest. + // The references are strictly ordered from base to head. A reference + // is anything which can be represented by a distribution.Descriptor + References() []Descriptor + + // Payload provides the serialized format of the manifest, in addition to + // the mediatype. + Payload() (mediatype string, payload []byte, err error) +} + +// ManifestBuilder creates a manifest allowing one to include dependencies. +// Instances can be obtained from a version-specific manifest package. Manifest +// specific data is passed into the function which creates the builder. +type ManifestBuilder interface { + // Build creates the manifest from his builder. + Build(ctx context.Context) (Manifest, error) + + // References returns a list of objects which have been added to this + // builder. The dependencies are returned in the order they were added, + // which should be from base to head. + References() []Descriptor + + // AppendReference includes the given object in the manifest after any + // existing dependencies. If the add fails, such as when adding an + // unsupported dependency, an error may be returned. + AppendReference(dependency Describable) error +} + +// ManifestService describes operations on image manifests. +type ManifestService interface { + // Exists returns true if the manifest exists. + Exists(ctx context.Context, dgst digest.Digest) (bool, error) + + // Get retrieves the manifest specified by the given digest + Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest + Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) + + // Delete removes the manifest specified by the given digest. Deleting + // a manifest that doesn't exist will return ErrManifestNotFound + Delete(ctx context.Context, dgst digest.Digest) error +} + +// ManifestEnumerator enables iterating over manifests +type ManifestEnumerator interface { + // Enumerate calls ingester for each manifest. + Enumerate(ctx context.Context, ingester func(digest.Digest) error) error +} + +// SignaturesGetter provides an interface for getting the signatures of a schema1 manifest. If the digest +// referred to is not a schema1 manifest, an error should be returned. +type SignaturesGetter interface { + GetSignatures(ctx context.Context, manifestDigest digest.Digest) ([]digest.Digest, error) +} + +// Describable is an interface for descriptors +type Describable interface { + Descriptor() Descriptor +} + +// ManifestMediaTypes returns the supported media types for manifests. +func ManifestMediaTypes() (mediaTypes []string) { + for t := range mappings { + if t != "" { + mediaTypes = append(mediaTypes, t) + } + } + return +} + +// UnmarshalFunc implements manifest unmarshalling a given MediaType +type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) + +var mappings = make(map[string]UnmarshalFunc, 0) + +// UnmarshalManifest looks up manifest unmarshal functions based on +// MediaType +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual media type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediatype string + if ctHeader != "" { + var err error + mediatype, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } + } + + unmarshalFunc, ok := mappings[mediatype] + if !ok { + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) + } + } + + return unmarshalFunc(p) +} + +// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This +// should be called from specific +func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error { + if _, ok := mappings[mediatype]; ok { + return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype) + } + mappings[mediatype] = u + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go index d4a3e1f6ef71..502288a40453 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge.go @@ -7,7 +7,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" "github.com/docker/distribution/uuid" ) @@ -23,8 +23,8 @@ var _ Listener = &bridge{} // URLBuilder defines a subset of url builder to be used by the event listener. type URLBuilder interface { - BuildManifestURL(name, tag string) (string, error) - BuildBlobURL(name string, dgst digest.Digest) (string, error) + BuildManifestURL(name reference.Named) (string, error) + BuildBlobURL(ref reference.Canonical) (string, error) } // NewBridge returns a notification listener that writes records to sink, @@ -53,31 +53,62 @@ func NewRequestRecord(id string, r *http.Request) RequestRecord { } } -func (b *bridge) ManifestPushed(repo string, sm *schema1.SignedManifest) error { - return b.createManifestEventAndWrite(EventActionPush, repo, sm) +func (b *bridge) ManifestPushed(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error { + manifestEvent, err := b.createManifestEvent(EventActionPush, repo, sm) + if err != nil { + return err + } + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + manifestEvent.Target.Tag = opt.Tag + break + } + } + return b.sink.Write(*manifestEvent) } -func (b *bridge) ManifestPulled(repo string, sm *schema1.SignedManifest) error { - return b.createManifestEventAndWrite(EventActionPull, repo, sm) +func (b *bridge) ManifestPulled(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error { + manifestEvent, err := b.createManifestEvent(EventActionPull, repo, sm) + if err != nil { + return err + } + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + manifestEvent.Target.Tag = opt.Tag + break + } + } + return b.sink.Write(*manifestEvent) } -func (b *bridge) ManifestDeleted(repo string, sm *schema1.SignedManifest) error { - return b.createManifestEventAndWrite(EventActionDelete, repo, sm) +func (b *bridge) ManifestDeleted(repo reference.Named, dgst digest.Digest) error { + return b.createManifestDeleteEventAndWrite(EventActionDelete, repo, dgst) } -func (b *bridge) BlobPushed(repo string, desc distribution.Descriptor) error { +func (b *bridge) BlobPushed(repo reference.Named, desc distribution.Descriptor) error { return b.createBlobEventAndWrite(EventActionPush, repo, desc) } -func (b *bridge) BlobPulled(repo string, desc distribution.Descriptor) error { +func (b *bridge) BlobPulled(repo reference.Named, desc distribution.Descriptor) error { return b.createBlobEventAndWrite(EventActionPull, repo, desc) } -func (b *bridge) BlobDeleted(repo string, desc distribution.Descriptor) error { - return b.createBlobEventAndWrite(EventActionDelete, repo, desc) +func (b *bridge) BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error { + event, err := b.createBlobEvent(EventActionMount, repo, desc) + if err != nil { + return err + } + event.Target.FromRepository = fromRepo.Name() + return b.sink.Write(*event) +} + +func (b *bridge) BlobDeleted(repo reference.Named, dgst digest.Digest) error { + return b.createBlobDeleteEventAndWrite(EventActionDelete, repo, dgst) } -func (b *bridge) createManifestEventAndWrite(action string, repo string, sm *schema1.SignedManifest) error { +func (b *bridge) createManifestEventAndWrite(action string, repo reference.Named, sm distribution.Manifest) error { manifestEvent, err := b.createManifestEvent(action, repo, sm) if err != nil { return err @@ -86,24 +117,40 @@ func (b *bridge) createManifestEventAndWrite(action string, repo string, sm *sch return b.sink.Write(*manifestEvent) } -func (b *bridge) createManifestEvent(action string, repo string, sm *schema1.SignedManifest) (*Event, error) { +func (b *bridge) createManifestDeleteEventAndWrite(action string, repo reference.Named, dgst digest.Digest) error { + event := b.createEvent(action) + event.Target.Repository = repo.Name() + event.Target.Digest = dgst + + return b.sink.Write(*event) +} + +func (b *bridge) createManifestEvent(action string, repo reference.Named, sm distribution.Manifest) (*Event, error) { event := b.createEvent(action) - event.Target.MediaType = schema1.ManifestMediaType - event.Target.Repository = repo + event.Target.Repository = repo.Name() + + mt, p, err := sm.Payload() + if err != nil { + return nil, err + } - p, err := sm.Payload() + // Ensure we have the canonical manifest descriptor here + _, desc, err := distribution.UnmarshalManifest(mt, p) if err != nil { return nil, err } - event.Target.Length = int64(len(p)) - event.Target.Size = int64(len(p)) - event.Target.Digest, err = digest.FromBytes(p) + event.Target.MediaType = mt + event.Target.Length = desc.Size + event.Target.Size = desc.Size + event.Target.Digest = desc.Digest + + ref, err := reference.WithDigest(repo, event.Target.Digest) if err != nil { return nil, err } - event.Target.URL, err = b.ub.BuildManifestURL(sm.Name, event.Target.Digest.String()) + event.Target.URL, err = b.ub.BuildManifestURL(ref) if err != nil { return nil, err } @@ -111,7 +158,15 @@ func (b *bridge) createManifestEvent(action string, repo string, sm *schema1.Sig return event, nil } -func (b *bridge) createBlobEventAndWrite(action string, repo string, desc distribution.Descriptor) error { +func (b *bridge) createBlobDeleteEventAndWrite(action string, repo reference.Named, dgst digest.Digest) error { + event := b.createEvent(action) + event.Target.Digest = dgst + event.Target.Repository = repo.Name() + + return b.sink.Write(*event) +} + +func (b *bridge) createBlobEventAndWrite(action string, repo reference.Named, desc distribution.Descriptor) error { event, err := b.createBlobEvent(action, repo, desc) if err != nil { return err @@ -120,14 +175,18 @@ func (b *bridge) createBlobEventAndWrite(action string, repo string, desc distri return b.sink.Write(*event) } -func (b *bridge) createBlobEvent(action string, repo string, desc distribution.Descriptor) (*Event, error) { +func (b *bridge) createBlobEvent(action string, repo reference.Named, desc distribution.Descriptor) (*Event, error) { event := b.createEvent(action) event.Target.Descriptor = desc event.Target.Length = desc.Size - event.Target.Repository = repo + event.Target.Repository = repo.Name() + + ref, err := reference.WithDigest(repo, desc.Digest) + if err != nil { + return nil, err + } - var err error - event.Target.URL, err = b.ub.BuildBlobURL(repo, desc.Digest) + event.Target.URL, err = b.ub.BuildBlobURL(ref) if err != nil { return nil, err } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge_test.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge_test.go index a291acb77a32..0f85791c8586 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/bridge_test.go @@ -3,14 +3,13 @@ package notifications import ( "testing" + "github.com/docker/distribution" "github.com/docker/distribution/digest" - - "github.com/docker/libtrust" - "github.com/docker/distribution/manifest/schema1" - + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/uuid" + "github.com/docker/libtrust" ) var ( @@ -21,7 +20,7 @@ var ( Addr: "remote.test", InstanceID: uuid.Generate().String(), } - ub = mustUB(v2.NewURLBuilderFromString("http://test.example.com/")) + ub = mustUB(v2.NewURLBuilderFromString("http://test.example.com/", false)) actor = ActorRecord{ Name: "test", @@ -38,14 +37,14 @@ var ( ) func TestEventBridgeManifestPulled(t *testing.T) { - l := createTestEnv(t, testSinkFn(func(events ...Event) error { checkCommonManifest(t, EventActionPull, events...) return nil })) - if err := l.ManifestPulled(repo, sm); err != nil { + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestPulled(repoRef, sm); err != nil { t.Fatalf("unexpected error notifying manifest pull: %v", err) } } @@ -57,19 +56,52 @@ func TestEventBridgeManifestPushed(t *testing.T) { return nil })) - if err := l.ManifestPushed(repo, sm); err != nil { + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestPushed(repoRef, sm); err != nil { t.Fatalf("unexpected error notifying manifest pull: %v", err) } } -func TestEventBridgeManifestDeleted(t *testing.T) { +func TestEventBridgeManifestPushedWithTag(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkCommonManifest(t, EventActionPush, events...) + if events[0].Target.Tag != "latest" { + t.Fatalf("missing or unexpected tag: %#v", events[0].Target) + } + + return nil + })) + + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestPushed(repoRef, sm, distribution.WithTag(m.Tag)); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestPulledWithTag(t *testing.T) { l := createTestEnv(t, testSinkFn(func(events ...Event) error { - checkCommonManifest(t, EventActionDelete, events...) + checkCommonManifest(t, EventActionPull, events...) + if events[0].Target.Tag != "latest" { + t.Fatalf("missing or unexpected tag: %#v", events[0].Target) + } + + return nil + })) + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestPulled(repoRef, sm, distribution.WithTag(m.Tag)); err != nil { + t.Fatalf("unexpected error notifying manifest pull: %v", err) + } +} + +func TestEventBridgeManifestDeleted(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkDeleted(t, EventActionDelete, events...) return nil })) - if err := l.ManifestDeleted(repo, sm); err != nil { + repoRef, _ := reference.ParseNamed(repo) + if err := l.ManifestDeleted(repoRef, dgst); err != nil { t.Fatalf("unexpected error notifying manifest pull: %v", err) } } @@ -85,17 +117,39 @@ func createTestEnv(t *testing.T, fn testSinkFn) Listener { t.Fatalf("error signing manifest: %v", err) } - payload, err = sm.Payload() - if err != nil { - t.Fatalf("error getting manifest payload: %v", err) + payload = sm.Canonical + dgst = digest.FromBytes(payload) + + return NewBridge(ub, source, actor, request, fn) +} + +func checkDeleted(t *testing.T, action string, events ...Event) { + if len(events) != 1 { + t.Fatalf("unexpected number of events: %v != 1", len(events)) } - dgst, err = digest.FromBytes(payload) - if err != nil { - t.Fatalf("error digesting manifest payload: %v", err) + event := events[0] + + if event.Source != source { + t.Fatalf("source not equal: %#v != %#v", event.Source, source) + } + + if event.Request != request { + t.Fatalf("request not equal: %#v != %#v", event.Request, request) + } + + if event.Actor != actor { + t.Fatalf("request not equal: %#v != %#v", event.Actor, actor) + } + + if event.Target.Digest != dgst { + t.Fatalf("unexpected digest on event target: %q != %q", event.Target.Digest, dgst) + } + + if event.Target.Repository != repo { + t.Fatalf("unexpected repository: %q != %q", event.Target.Repository, repo) } - return NewBridge(ub, source, actor, request, fn) } func checkCommonManifest(t *testing.T, action string, events ...Event) { @@ -106,13 +160,15 @@ func checkCommonManifest(t *testing.T, action string, events ...Event) { t.Fatalf("unexpected event action: %q != %q", event.Action, action) } - u, err := ub.BuildManifestURL(repo, dgst.String()) + repoRef, _ := reference.ParseNamed(repo) + ref, _ := reference.WithDigest(repoRef, dgst) + u, err := ub.BuildManifestURL(ref) if err != nil { t.Fatalf("error building expected url: %v", err) } if event.Target.URL != u { - t.Fatalf("incorrect url passed: %q != %q", event.Target.URL, u) + t.Fatalf("incorrect url passed: \n%q != \n%q", event.Target.URL, u) } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go index 97030026490c..b59a72bede5c 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event.go @@ -11,6 +11,7 @@ import ( const ( EventActionPull = "pull" EventActionPush = "push" + EventActionMount = "mount" EventActionDelete = "delete" ) @@ -61,8 +62,15 @@ type Event struct { // Repository identifies the named repository. Repository string `json:"repository,omitempty"` + // FromRepository identifies the named repository which a blob was mounted + // from if appropriate. + FromRepository string `json:"fromRepository,omitempty"` + // URL provides a direct link to the content. URL string `json:"url,omitempty"` + + // Tag provides the tag + Tag string `json:"tag,omitempty"` } `json:"target,omitempty"` // Request covers the request that generated the event. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/event_test.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event_test.go index 8aa797e665f8..0981a7ad44f0 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/event_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/event_test.go @@ -21,7 +21,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { "timestamp": "2006-01-02T15:04:05Z", "action": "push", "target": { - "mediaType": "application/vnd.docker.distribution.manifest.v1+json", + "mediaType": "application/vnd.docker.distribution.manifest.v1+prettyjws", "size": 1, "digest": "sha256:0123456789abcdef0", "length": 1, @@ -49,7 +49,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { "target": { "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", "size": 2, - "digest": "tarsum.v2+sha256:0123456789abcdef1", + "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", "length": 2, "repository": "library/test", "url": "http://example.com/v2/library/test/manifests/latest" @@ -75,7 +75,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { "target": { "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", "size": 3, - "digest": "tarsum.v2+sha256:0123456789abcdef2", + "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6", "length": 3, "repository": "library/test", "url": "http://example.com/v2/library/test/manifests/latest" @@ -120,14 +120,14 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { manifestPush.Target.Digest = "sha256:0123456789abcdef0" manifestPush.Target.Length = 1 manifestPush.Target.Size = 1 - manifestPush.Target.MediaType = schema1.ManifestMediaType + manifestPush.Target.MediaType = schema1.MediaTypeSignedManifest manifestPush.Target.Repository = "library/test" manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" var layerPush0 Event layerPush0 = prototype layerPush0.ID = "asdf-asdf-asdf-asdf-1" - layerPush0.Target.Digest = "tarsum.v2+sha256:0123456789abcdef1" + layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" layerPush0.Target.Length = 2 layerPush0.Target.Size = 2 layerPush0.Target.MediaType = layerMediaType @@ -137,7 +137,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { var layerPush1 Event layerPush1 = prototype layerPush1.ID = "asdf-asdf-asdf-asdf-2" - layerPush1.Target.Digest = "tarsum.v2+sha256:0123456789abcdef2" + layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6" layerPush1.Target.Length = 3 layerPush1.Target.Size = 3 layerPush1.Target.MediaType = layerMediaType diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/http_test.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/http_test.go index 6e10c62214f9..854dd404d672 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/http_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/http_test.go @@ -75,12 +75,12 @@ func TestHTTPSink(t *testing.T) { { statusCode: http.StatusOK, events: []Event{ - createTestEvent("push", "library/test", schema1.ManifestMediaType)}, + createTestEvent("push", "library/test", schema1.MediaTypeSignedManifest)}, }, { statusCode: http.StatusOK, events: []Event{ - createTestEvent("push", "library/test", schema1.ManifestMediaType), + createTestEvent("push", "library/test", schema1.MediaTypeSignedManifest), createTestEvent("push", "library/test", layerMediaType), createTestEvent("push", "library/test", layerMediaType), }, diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go index 6c558a47152e..b99133dadca8 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener.go @@ -3,33 +3,25 @@ package notifications import ( "net/http" - "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" ) // ManifestListener describes a set of methods for listening to events related to manifests. type ManifestListener interface { - ManifestPushed(repo string, sm *schema1.SignedManifest) error - ManifestPulled(repo string, sm *schema1.SignedManifest) error - - // TODO(stevvooe): Please note that delete support is still a little shaky - // and we'll need to propagate these in the future. - - ManifestDeleted(repo string, sm *schema1.SignedManifest) error + ManifestPushed(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error + ManifestPulled(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error + ManifestDeleted(repo reference.Named, dgst digest.Digest) error } // BlobListener describes a listener that can respond to layer related events. type BlobListener interface { - BlobPushed(repo string, desc distribution.Descriptor) error - BlobPulled(repo string, desc distribution.Descriptor) error - - // TODO(stevvooe): Please note that delete support is still a little shaky - // and we'll need to propagate these in the future. - - BlobDeleted(repo string, desc distribution.Descriptor) error + BlobPushed(repo reference.Named, desc distribution.Descriptor) error + BlobPulled(repo reference.Named, desc distribution.Descriptor) error + BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error + BlobDeleted(repo reference.Named, desc digest.Digest) error } // Listener combines all repository events into a single interface. @@ -74,38 +66,38 @@ type manifestServiceListener struct { parent *repositoryListener } -func (msl *manifestServiceListener) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { - sm, err := msl.ManifestService.Get(dgst) +func (msl *manifestServiceListener) Delete(ctx context.Context, dgst digest.Digest) error { + err := msl.ManifestService.Delete(ctx, dgst) if err == nil { - if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Name(), sm); err != nil { - logrus.Errorf("error dispatching manifest pull to listener: %v", err) + if err := msl.parent.listener.ManifestDeleted(msl.parent.Repository.Named(), dgst); err != nil { + context.GetLogger(ctx).Errorf("error dispatching manifest delete to listener: %v", err) } } - return sm, err + return err } -func (msl *manifestServiceListener) Put(sm *schema1.SignedManifest) error { - err := msl.ManifestService.Put(sm) - +func (msl *manifestServiceListener) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + sm, err := msl.ManifestService.Get(ctx, dgst) if err == nil { - if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Name(), sm); err != nil { - logrus.Errorf("error dispatching manifest push to listener: %v", err) + if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Named(), sm, options...); err != nil { + context.GetLogger(ctx).Errorf("error dispatching manifest pull to listener: %v", err) } } - return err + return sm, err } -func (msl *manifestServiceListener) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { - sm, err := msl.ManifestService.GetByTag(tag, options...) +func (msl *manifestServiceListener) Put(ctx context.Context, sm distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + dgst, err := msl.ManifestService.Put(ctx, sm, options...) + if err == nil { - if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Name(), sm); err != nil { - logrus.Errorf("error dispatching manifest pull to listener: %v", err) + if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Named(), sm, options...); err != nil { + context.GetLogger(ctx).Errorf("error dispatching manifest push to listener: %v", err) } } - return sm, err + return dgst, err } type blobServiceListener struct { @@ -121,7 +113,7 @@ func (bsl *blobServiceListener) Get(ctx context.Context, dgst digest.Digest) ([] if desc, err := bsl.Stat(ctx, dgst); err != nil { context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) } else { - if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) } } @@ -136,7 +128,7 @@ func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (d if desc, err := bsl.Stat(ctx, dgst); err != nil { context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) } else { - if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) } } @@ -151,7 +143,7 @@ func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWr if desc, err := bsl.Stat(ctx, dgst); err != nil { context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) } else { - if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { + if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) } } @@ -163,19 +155,37 @@ func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWr func (bsl *blobServiceListener) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { desc, err := bsl.BlobStore.Put(ctx, mediaType, p) if err == nil { - if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Name(), desc); err != nil { - context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Named(), desc); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer push to listener: %v", err) } } return desc, err } -func (bsl *blobServiceListener) Create(ctx context.Context) (distribution.BlobWriter, error) { - wr, err := bsl.BlobStore.Create(ctx) +func (bsl *blobServiceListener) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + wr, err := bsl.BlobStore.Create(ctx, options...) + switch err := err.(type) { + case distribution.ErrBlobMounted: + if err := bsl.parent.listener.BlobMounted(bsl.parent.Repository.Named(), err.Descriptor, err.From); err != nil { + context.GetLogger(ctx).Errorf("error dispatching blob mount to listener: %v", err) + } + return nil, err + } return bsl.decorateWriter(wr), err } +func (bsl *blobServiceListener) Delete(ctx context.Context, dgst digest.Digest) error { + err := bsl.BlobStore.Delete(ctx, dgst) + if err == nil { + if err := bsl.parent.listener.BlobDeleted(bsl.parent.Repository.Named(), dgst); err != nil { + context.GetLogger(ctx).Errorf("error dispatching layer delete to listener: %v", err) + } + } + + return err +} + func (bsl *blobServiceListener) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { wr, err := bsl.BlobStore.Resume(ctx, id) return bsl.decorateWriter(wr), err @@ -196,7 +206,7 @@ type blobWriterListener struct { func (bwl *blobWriterListener) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { committed, err := bwl.BlobWriter.Commit(ctx, desc) if err == nil { - if err := bwl.parent.parent.listener.BlobPushed(bwl.parent.parent.Repository.Name(), committed); err != nil { + if err := bwl.parent.parent.listener.BlobPushed(bwl.parent.parent.Repository.Named(), committed); err != nil { context.GetLogger(ctx).Errorf("error dispatching blob push to listener: %v", err) } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go index 04dae1184316..d16a4560b75b 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/listener_test.go @@ -10,6 +10,7 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -27,7 +28,8 @@ func TestListener(t *testing.T) { ops: make(map[string]int), } - repository, err := registry.Repository(ctx, "foo/bar") + repoRef, _ := reference.ParseNamed("foo/bar") + repository, err := registry.Repository(ctx, repoRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } @@ -37,12 +39,12 @@ func TestListener(t *testing.T) { checkExerciseRepository(t, repository) expectedOps := map[string]int{ - "manifest:push": 1, - "manifest:pull": 2, - // "manifest:delete": 0, // deletes not supported for now - "layer:push": 2, - "layer:pull": 2, - // "layer:delete": 0, // deletes not supported for now + "manifest:push": 1, + "manifest:pull": 1, + "manifest:delete": 1, + "layer:push": 2, + "layer:pull": 2, + "layer:delete": 2, } if !reflect.DeepEqual(tl.ops, expectedOps) { @@ -55,33 +57,38 @@ type testListener struct { ops map[string]int } -func (tl *testListener) ManifestPushed(repo string, sm *schema1.SignedManifest) error { +func (tl *testListener) ManifestPushed(repo reference.Named, m distribution.Manifest, options ...distribution.ManifestServiceOption) error { tl.ops["manifest:push"]++ return nil } -func (tl *testListener) ManifestPulled(repo string, sm *schema1.SignedManifest) error { +func (tl *testListener) ManifestPulled(repo reference.Named, m distribution.Manifest, options ...distribution.ManifestServiceOption) error { tl.ops["manifest:pull"]++ return nil } -func (tl *testListener) ManifestDeleted(repo string, sm *schema1.SignedManifest) error { +func (tl *testListener) ManifestDeleted(repo reference.Named, d digest.Digest) error { tl.ops["manifest:delete"]++ return nil } -func (tl *testListener) BlobPushed(repo string, desc distribution.Descriptor) error { +func (tl *testListener) BlobPushed(repo reference.Named, desc distribution.Descriptor) error { tl.ops["layer:push"]++ return nil } -func (tl *testListener) BlobPulled(repo string, desc distribution.Descriptor) error { +func (tl *testListener) BlobPulled(repo reference.Named, desc distribution.Descriptor) error { tl.ops["layer:pull"]++ return nil } -func (tl *testListener) BlobDeleted(repo string, desc distribution.Descriptor) error { +func (tl *testListener) BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error { + tl.ops["layer:mount"]++ + return nil +} + +func (tl *testListener) BlobDeleted(repo reference.Named, d digest.Digest) error { tl.ops["layer:delete"]++ return nil } @@ -93,16 +100,20 @@ func checkExerciseRepository(t *testing.T, repository distribution.Repository) { // takes the registry through a common set of operations. This could be // used to make cross-cutting updates by changing internals that affect // update counts. Basically, it would make writing tests a lot easier. + ctx := context.Background() tag := "thetag" + // todo: change this to use Builder + m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, - Name: repository.Name(), + Name: repository.Named().Name(), Tag: tag, } + var blobDigests []digest.Digest blobs := repository.Blobs(ctx) for i := 0; i < 2; i++ { rs, ds, err := testutil.CreateRandomTarFile() @@ -110,6 +121,7 @@ func checkExerciseRepository(t *testing.T, repository distribution.Repository) { t.Fatalf("error creating test layer: %v", err) } dgst := digest.Digest(ds) + blobDigests = append(blobDigests, dgst) wr, err := blobs.Create(ctx) if err != nil { @@ -158,35 +170,31 @@ func checkExerciseRepository(t *testing.T, repository distribution.Repository) { t.Fatal(err.Error()) } - if err = manifests.Put(sm); err != nil { + var digestPut digest.Digest + if digestPut, err = manifests.Put(ctx, sm); err != nil { t.Fatalf("unexpected error putting the manifest: %v", err) } - p, err := sm.Payload() - if err != nil { - t.Fatalf("unexpected error getting manifest payload: %v", err) + dgst := digest.FromBytes(sm.Canonical) + if dgst != digestPut { + t.Fatalf("mismatching digest from payload and put") } - dgst, err := digest.FromBytes(p) - if err != nil { - t.Fatalf("unexpected error digesting manifest payload: %v", err) - } - - fetchedByManifest, err := manifests.Get(dgst) + _, err = manifests.Get(ctx, dgst) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } - if fetchedByManifest.Tag != sm.Tag { - t.Fatalf("retrieved unexpected manifest: %v", err) - } - - fetched, err := manifests.GetByTag(tag) + err = manifests.Delete(ctx, dgst) if err != nil { - t.Fatalf("unexpected error fetching manifest: %v", err) + t.Fatalf("unexpected error deleting blob: %v", err) } - if fetched.Tag != fetchedByManifest.Tag { - t.Fatalf("retrieved unexpected manifest: %v", err) + for _, d := range blobDigests { + err = blobs.Delete(ctx, d) + if err != nil { + t.Fatalf("unexpected error deleting blob: %v", err) + } + } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go b/Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go index 2a8ffcbd26f6..a20af168733c 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/notifications/metrics.go @@ -34,7 +34,7 @@ func newSafeMetrics() *safeMetrics { } // httpStatusListener returns the listener for the http sink that updates the -// relevent counters. +// relevant counters. func (sm *safeMetrics) httpStatusListener() httpStatusListener { return &endpointMetricsHTTPStatusListener{ safeMetrics: sm, @@ -49,7 +49,7 @@ func (sm *safeMetrics) eventQueueListener() eventQueueListener { } // endpointMetricsHTTPStatusListener increments counters related to http sinks -// for the relevent events. +// for the relevant events. type endpointMetricsHTTPStatusListener struct { *safeMetrics } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/dev-image/Dockerfile b/Godeps/_workspace/src/github.com/docker/distribution/project/dev-image/Dockerfile deleted file mode 100644 index 1e2a8471cfb4..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/project/dev-image/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM ubuntu:14.04 - -ENV GOLANG_VERSION 1.4rc1 -ENV GOPATH /var/cache/drone -ENV GOROOT /usr/local/go -ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin - -ENV LANG C -ENV LC_ALL C - -RUN apt-get update && apt-get install -y \ - wget ca-certificates git mercurial bzr \ - --no-install-recommends \ - && rm -rf /var/lib/apt/lists/* - -RUN wget https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz --quiet && \ - tar -C /usr/local -xzf go$GOLANG_VERSION.linux-amd64.tar.gz && \ - rm go${GOLANG_VERSION}.linux-amd64.tar.gz - -RUN go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/README.md b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/README.md deleted file mode 100644 index eda886966cb9..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Git Hooks -========= - -To enforce valid and properly-formatted code, there is CI in place which runs `gofmt`, `golint`, and `go vet` against code in the repository. - -As an aid to prevent committing invalid code in the first place, a git pre-commit hook has been added to the repository, found in [pre-commit](./pre-commit). As it is impossible to automatically add linked hooks to a git repository, this hook should be linked into your `.git/hooks/pre-commit`, which can be done by running the `configure-hooks.sh` script in this directory. This script is the preferred method of configuring hooks, as it will be updated as more are added. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh deleted file mode 100644 index 6afea8a13625..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/configure-hooks.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh - -cd $(dirname $0) - -REPO_ROOT=$(git rev-parse --show-toplevel) -RESOLVE_REPO_ROOT_STATUS=$? -if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then - echo -e "Unable to resolve repository root. Error:\n$REPO_ROOT" > /dev/stderr - exit $RESOLVE_REPO_ROOT_STATUS -fi - -set -e -set -x - -# Just in case the directory doesn't exist -mkdir -p $REPO_ROOT/.git/hooks - -ln -f -s $(pwd)/pre-commit $REPO_ROOT/.git/hooks/pre-commit \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit b/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit deleted file mode 100644 index 3ee2e913fa06..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/project/hooks/pre-commit +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/sh - -REPO_ROOT=$(git rev-parse --show-toplevel) -RESOLVE_REPO_ROOT_STATUS=$? -if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then - printf "Unable to resolve repository root. Error:\n%s\n" "$RESOLVE_REPO_ROOT_STATUS" > /dev/stderr - exit $RESOLVE_REPO_ROOT_STATUS -fi - -cd $REPO_ROOT - -GOFMT_ERRORS=$(gofmt -s -l . 2>&1) -if [ -n "$GOFMT_ERRORS" ]; then - printf 'gofmt failed for the following files:\n%s\n\nPlease run "gofmt -s -l ." in the root of your repository before committing\n' "$GOFMT_ERRORS" > /dev/stderr - exit 1 -fi - -GOLINT_ERRORS=$(golint ./... 2>&1) -if [ -n "$GOLINT_ERRORS" ]; then - printf "golint failed with the following errors:\n%s\n" "$GOLINT_ERRORS" > /dev/stderr - exit 1 -fi - -GOVET_ERRORS=$(go vet ./... 2>&1) -GOVET_STATUS=$? -if [ "$GOVET_STATUS" -ne "0" ]; then - printf "govet failed with the following errors:\n%s\n" "$GOVET_ERRORS" > /dev/stderr - exit $GOVET_STATUS -fi diff --git a/Godeps/_workspace/src/github.com/docker/distribution/reference/reference.go b/Godeps/_workspace/src/github.com/docker/distribution/reference/reference.go index c3e77c6e83ae..bb09fa25dae2 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/reference/reference.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/reference/reference.go @@ -3,10 +3,10 @@ // // Grammar // -// reference := repository [ ":" tag ] [ "@" digest ] +// reference := name [ ":" tag ] [ "@" digest ] // name := [hostname '/'] component ['/' component]* // hostname := hostcomponent ['.' hostcomponent]* [':' port-number] -// hostcomponent := /([a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])/ +// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // component := alpha-numeric [separator alpha-numeric]* // alpha-numeric := /[a-z0-9]+/ @@ -46,8 +46,7 @@ var ( // ErrNameEmpty is returned for empty, invalid repository names. ErrNameEmpty = errors.New("repository name must have at least one component") - // ErrNameTooLong is returned when a repository name is longer than - // RepositoryNameTotalLengthMax + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) ) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/reference/regexp.go b/Godeps/_workspace/src/github.com/docker/distribution/reference/regexp.go index a4ffe5b642f0..9a7d366bc8a8 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/reference/regexp.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/reference/regexp.go @@ -22,7 +22,7 @@ var ( // hostnameComponentRegexp restricts the registry hostname component of a // repository name to start with a component as defined by hostnameRegexp // and followed by an optional port. - hostnameComponentRegexp = match(`(?:[a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])`) + hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) // hostnameRegexp defines the structure of potential hostname components // that may be part of image names. This is purposely a subset of what is @@ -49,7 +49,7 @@ var ( // NameRegexp is the format for the name component of references. The // regexp has capturing groups for the hostname and name part omitting - // the seperating forward slash from either. + // the separating forward slash from either. NameRegexp = expression( optional(hostnameRegexp, literal(`/`)), nameComponentRegexp, diff --git a/Godeps/_workspace/src/github.com/docker/distribution/reference/regexp_test.go b/Godeps/_workspace/src/github.com/docker/distribution/reference/regexp_test.go index 3394491875c4..2ec39377af82 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/reference/regexp_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/reference/regexp_test.go @@ -111,6 +111,10 @@ func TestHostRegexp(t *testing.T) { input: "xn--n3h.com", // ☃.com in punycode match: true, }, + { + input: "Asdf.com", // uppercase character + match: true, + }, } r := regexp.MustCompile(`^` + hostnameRegexp.String() + `$`) for i := range hostcases { @@ -399,6 +403,14 @@ func TestFullNameRegexp(t *testing.T) { match: true, subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"}, }, + { + input: "Asdf.com/foo/bar", // uppercase character in hostname + match: true, + }, + { + input: "Foo/FarB", // uppercase characters in remote name + match: false, + }, } for i := range testcases { checkRegexp(t, anchoredNameRegexp, testcases[i]) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry.go index 9316567c8085..1ede31ebb63c 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry.go @@ -2,8 +2,7 @@ package distribution import ( "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" ) // Scope defines the set of items that match a namespace. @@ -34,7 +33,7 @@ type Namespace interface { // Repository should return a reference to the named repository. The // registry may or may not have the repository but should always return a // reference. - Repository(ctx context.Context, name string) (Repository, error) + Repository(ctx context.Context, name reference.Named) (Repository, error) // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories // up to the size of 'repos' and returns the value 'n' for the number of entries @@ -42,17 +41,41 @@ type Namespace interface { // set to io.EOF if there are no more entries to obtain. Repositories(ctx context.Context, repos []string, last string) (n int, err error) - // Blobs returns a reference to registry's blob service. - Blobs() BlobService + // Blobs returns a blob enumerator to access all blobs + Blobs() BlobEnumerator + + // BlobStatter returns a BlobStatter to control + BlobStatter() BlobStatter +} + +// RepositoryEnumerator describes an operation to enumerate repositories +type RepositoryEnumerator interface { + Enumerate(ctx context.Context, ingester func(string) error) error } // ManifestServiceOption is a function argument for Manifest Service methods -type ManifestServiceOption func(ManifestService) error +type ManifestServiceOption interface { + Apply(ManifestService) error +} + +// WithTag allows a tag to be passed into Put +func WithTag(tag string) ManifestServiceOption { + return WithTagOption{tag} +} + +// WithTagOption holds a tag +type WithTagOption struct{ Tag string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithTagOption) Apply(m ManifestService) error { + // no implementation + return nil +} // Repository is a named collection of manifests and layers. type Repository interface { - // Name returns the name of the repository. - Name() string + // Named returns the name of the repository. + Named() reference.Named // Manifests returns a reference to this repository's manifest service. // with the supplied options applied. @@ -65,69 +88,10 @@ type Repository interface { // be a BlobService for use with clients. This will allow such // implementations to avoid implementing ServeBlob. - // Signatures returns a reference to this repository's signatures service. - Signatures() SignatureService + // Tags returns a reference to this repositories tag service + Tags(ctx context.Context) TagService } // TODO(stevvooe): Must add close methods to all these. May want to change the // way instances are created to better reflect internal dependency // relationships. - -// ManifestService provides operations on image manifests. -type ManifestService interface { - // Exists returns true if the manifest exists. - Exists(dgst digest.Digest) (bool, error) - - // Get retrieves the identified by the digest, if it exists. - Get(dgst digest.Digest) (*schema1.SignedManifest, error) - - // Enumerate returns an array of manifest revisions in repository. - Enumerate() ([]digest.Digest, error) - - // Delete removes the manifest, if it exists. - Delete(dgst digest.Digest) error - - // Put creates or updates the manifest. - Put(manifest *schema1.SignedManifest) error - - // TODO(stevvooe): The methods after this message should be moved to a - // discrete TagService, per active proposals. - - // Tags lists the tags under the named repository. - Tags() ([]string, error) - - // ExistsByTag returns true if the manifest exists. - ExistsByTag(tag string) (bool, error) - - // GetByTag retrieves the named manifest, if it exists. - GetByTag(tag string, options ...ManifestServiceOption) (*schema1.SignedManifest, error) - - // TODO(stevvooe): There are several changes that need to be done to this - // interface: - // - // 1. Allow explicit tagging with Tag(digest digest.Digest, tag string) - // 2. Support reading tags with a re-entrant reader to avoid large - // allocations in the registry. - // 3. Long-term: Provide All() method that lets one scroll through all of - // the manifest entries. - // 4. Long-term: break out concept of signing from manifests. This is - // really a part of the distribution sprint. - // 5. Long-term: Manifest should be an interface. This code shouldn't - // really be concerned with the storage format. -} - -// SignatureService provides operations on signatures. -type SignatureService interface { - // Get retrieves all of the signature blobs for the specified digest. - Get(dgst digest.Digest) ([][]byte, error) - - // Enumerate retrieves all signature digests for given manifest revision, - // if it exists. - Enumerate(dgst digest.Digest) ([]digest.Digest, error) - - // Put stores the signature for the provided digest. - Put(dgst digest.Digest, signatures ...[]byte) error - - // Delete removes all signature links of particular manifest revision. - Delete(revision digest.Digest) error -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go index fdaddbcf8e66..6d9bb4b62afb 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors.go @@ -25,7 +25,8 @@ func (ec ErrorCode) ErrorCode() ErrorCode { // Error returns the ID/Value func (ec ErrorCode) Error() string { - return ec.Descriptor().Value + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) } // Descriptor returns the descriptor for the error code. @@ -68,6 +69,15 @@ func (ec *ErrorCode) UnmarshalText(text []byte) error { return nil } +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + // WithDetail creates a new Error struct based on the passed-in info and // set the Detail property appropriately func (ec ErrorCode) WithDetail(detail interface{}) Error { @@ -104,9 +114,7 @@ func (e Error) ErrorCode() ErrorCode { // Error returns a human readable representation of the error. func (e Error) Error() string { - return fmt.Sprintf("%s: %s", - strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), - e.Message) + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) } // WithDetail will return a new Error, based on the current one, but with diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors_test.go index 27fb1cec7aab..54e7a736df80 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/errors_test.go @@ -4,9 +4,33 @@ import ( "encoding/json" "net/http" "reflect" + "strings" "testing" ) +// TestErrorsManagement does a quick check of the Errors type to ensure that +// members are properly pushed and marshaled. +var ErrorCodeTest1 = Register("test.errors", ErrorDescriptor{ + Value: "TEST1", + Message: "test error 1", + Description: `Just a test message #1.`, + HTTPStatusCode: http.StatusInternalServerError, +}) + +var ErrorCodeTest2 = Register("test.errors", ErrorDescriptor{ + Value: "TEST2", + Message: "test error 2", + Description: `Just a test message #2.`, + HTTPStatusCode: http.StatusNotFound, +}) + +var ErrorCodeTest3 = Register("test.errors", ErrorDescriptor{ + Value: "TEST3", + Message: "Sorry %q isn't valid", + Description: `Just a test message #3.`, + HTTPStatusCode: http.StatusNotFound, +}) + // TestErrorCodes ensures that error code format, mappings and // marshaling/unmarshaling. round trips are stable. func TestErrorCodes(t *testing.T) { @@ -56,33 +80,15 @@ func TestErrorCodes(t *testing.T) { if ecUnmarshaled != ec { t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec) } + + expectedErrorString := strings.ToLower(strings.Replace(ec.Descriptor().Value, "_", " ", -1)) + if ec.Error() != expectedErrorString { + t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString) + } } } -// TestErrorsManagement does a quick check of the Errors type to ensure that -// members are properly pushed and marshaled. -var ErrorCodeTest1 = Register("v2.errors", ErrorDescriptor{ - Value: "TEST1", - Message: "test error 1", - Description: `Just a test message #1.`, - HTTPStatusCode: http.StatusInternalServerError, -}) - -var ErrorCodeTest2 = Register("v2.errors", ErrorDescriptor{ - Value: "TEST2", - Message: "test error 2", - Description: `Just a test message #2.`, - HTTPStatusCode: http.StatusNotFound, -}) - -var ErrorCodeTest3 = Register("v2.errors", ErrorDescriptor{ - Value: "TEST3", - Message: "Sorry %q isn't valid", - Description: `Just a test message #3.`, - HTTPStatusCode: http.StatusNotFound, -}) - func TestErrorsManagement(t *testing.T) { var errs Errors diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go index 01c34384b3b4..7489e84f7d65 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/errcode/register.go @@ -63,6 +63,19 @@ var ( Description: "Returned when a service is not available", HTTPStatusCode: http.StatusServiceUnavailable, }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + // FIXME: go1.5 doesn't export http.StatusTooManyRequests while + // go1.6 does. Update the hardcoded value to the constant once + // Docker updates golang version to 1.6. + HTTPStatusCode: 429, + }) ) var nextCode = 1000 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go index 7eba362af467..582799948a1f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -271,7 +271,7 @@ type MethodDescriptor struct { // RequestDescriptor per API use case. type RequestDescriptor struct { // Name provides a short identifier for the request, usable as a title or - // to provide quick context for the particalar request. + // to provide quick context for the particular request. Name string // Description should cover the requests purpose, covering any details for @@ -303,14 +303,14 @@ type RequestDescriptor struct { // ResponseDescriptor describes the components of an API response. type ResponseDescriptor struct { // Name provides a short identifier for the response, usable as a title or - // to provide quick context for the particalar response. + // to provide quick context for the particular response. Name string // Description should provide a brief overview of the role of the // response. Description string - // StatusCode specifies the status recieved by this particular response. + // StatusCode specifies the status received by this particular response. StatusCode int // Headers covers any headers that may be returned from the response. @@ -495,7 +495,7 @@ var routeDescriptors = []RouteDescriptor{ Methods: []MethodDescriptor{ { Method: "GET", - Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ @@ -514,7 +514,7 @@ var routeDescriptors = []RouteDescriptor{ digestHeader, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "", Format: manifestBody, }, }, @@ -553,7 +553,7 @@ var routeDescriptors = []RouteDescriptor{ referenceParameterDescriptor, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "", Format: manifestBody, }, Successes: []ResponseDescriptor{ @@ -1041,6 +1041,70 @@ var routeDescriptors = []RouteDescriptor{ deniedResponseDescriptor, }, }, + { + Name: "Mount Blob", + Description: "Mount a blob identified by the `mount` parameter from another repository.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "mount", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of blob to mount from the source repository.`, + }, + { + Name: "from", + Type: "query", + Format: "", + Regexp: reference.NameRegexp, + Description: `Name of the source repository.`, + }, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been mounted in the repository and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, }, }, }, diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go index ece52a2cd090..97d6923aa032 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/errors.go @@ -84,7 +84,7 @@ var ( }) // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verfication. + // signature verification. ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_UNVERIFIED", Message: "manifest failed signature verification", diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go index f6379977093a..f632d981c0f5 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/routes_test.go @@ -87,14 +87,6 @@ func TestRouter(t *testing.T) { "name": "docker.com/foo/bar/baz", }, }, - { - RouteName: RouteNameBlob, - RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234", - Vars: map[string]string{ - "name": "foo/bar", - "digest": "tarsum.dev+foo:abcdef0919234", - }, - }, { RouteName: RouteNameBlob, RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go index 4297439400b4..a959aaa89713 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls.go @@ -5,7 +5,7 @@ import ( "net/url" "strings" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/gorilla/mux" ) @@ -17,33 +17,35 @@ import ( // under "/foo/v2/...". Most application will only provide a schema, host and // port, such as "https://localhost:5000/". type URLBuilder struct { - root *url.URL // url root (ie http://localhost/) - router *mux.Router + root *url.URL // url root (ie http://localhost/) + router *mux.Router + relative bool } // NewURLBuilder creates a URLBuilder with provided root url object. -func NewURLBuilder(root *url.URL) *URLBuilder { +func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { return &URLBuilder{ - root: root, - router: Router(), + root: root, + router: Router(), + relative: relative, } } // NewURLBuilderFromString workes identically to NewURLBuilder except it takes // a string argument for the root, returning an error if it is not a valid // url. -func NewURLBuilderFromString(root string) (*URLBuilder, error) { +func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { u, err := url.Parse(root) if err != nil { return nil, err } - return NewURLBuilder(u), nil + return NewURLBuilder(u, relative), nil } // NewURLBuilderFromRequest uses information from an *http.Request to // construct the root url. -func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { +func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { var scheme string forwardedProto := r.Header.Get("X-Forwarded-Proto") @@ -85,7 +87,7 @@ func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { u.Path = requestPath[0 : index+1] } - return NewURLBuilder(u) + return NewURLBuilder(u, relative) } // BuildBaseURL constructs a base url for the API, typically just "/v2/". @@ -113,10 +115,10 @@ func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { } // BuildTagsURL constructs a url to list the tags in the named repository. -func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { +func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { route := ub.cloneRoute(RouteNameTags) - tagsURL, err := route.URL("name", name) + tagsURL, err := route.URL("name", name.Name()) if err != nil { return "", err } @@ -126,10 +128,18 @@ func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { // BuildManifestURL constructs a url for the manifest identified by name and // reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { +func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { route := ub.cloneRoute(RouteNameManifest) - manifestURL, err := route.URL("name", name, "reference", reference) + tagOrDigest := "" + switch v := ref.(type) { + case reference.Tagged: + tagOrDigest = v.Tag() + case reference.Digested: + tagOrDigest = v.Digest().String() + } + + manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) if err != nil { return "", err } @@ -138,10 +148,10 @@ func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { } // BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) { +func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { route := ub.cloneRoute(RouteNameBlob) - layerURL, err := route.URL("name", name, "digest", dgst.String()) + layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) if err != nil { return "", err } @@ -151,10 +161,10 @@ func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, err // BuildBlobUploadURL constructs a url to begin a blob upload in the // repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) { +func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUpload) - uploadURL, err := route.URL("name", name) + uploadURL, err := route.URL("name", name.Name()) if err != nil { return "", err } @@ -166,10 +176,10 @@ func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (str // including any url values. This should generally not be used by clients, as // this url is provided by server implementations during the blob upload // process. -func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) { +func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUploadChunk) - uploadURL, err := route.URL("name", name, "uuid", uuid) + uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) if err != nil { return "", err } @@ -186,12 +196,13 @@ func (ub *URLBuilder) cloneRoute(name string) clonedRoute { *route = *ub.router.GetRoute(name) // clone the route *root = *ub.root - return clonedRoute{Route: route, root: root} + return clonedRoute{Route: route, root: root, relative: ub.relative} } type clonedRoute struct { *mux.Route - root *url.URL + root *url.URL + relative bool } func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { @@ -200,11 +211,17 @@ func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { return nil, err } + if cr.relative { + return routeURL, nil + } + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { routeURL.Path = routeURL.Path[1:] } - return cr.root.ResolveReference(routeURL), nil + url := cr.root.ResolveReference(routeURL) + url.Scheme = cr.root.Scheme + return url, nil } // appendValuesURL appends the parameters to the url. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls_test.go index fdcfc31a2f5a..10aadd52edb4 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/api/v2/urls_test.go @@ -4,6 +4,8 @@ import ( "net/http" "net/url" "testing" + + "github.com/docker/distribution/reference" ) type urlBuilderTestCase struct { @@ -13,6 +15,7 @@ type urlBuilderTestCase struct { } func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { + fooBarRef, _ := reference.ParseNamed("foo/bar") return []urlBuilderTestCase{ { description: "test base url", @@ -23,37 +26,39 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { description: "test tags url", expectedPath: "/v2/foo/bar/tags/list", build: func() (string, error) { - return urlBuilder.BuildTagsURL("foo/bar") + return urlBuilder.BuildTagsURL(fooBarRef) }, }, { description: "test manifest url", expectedPath: "/v2/foo/bar/manifests/tag", build: func() (string, error) { - return urlBuilder.BuildManifestURL("foo/bar", "tag") + ref, _ := reference.WithTag(fooBarRef, "tag") + return urlBuilder.BuildManifestURL(ref) }, }, { description: "build blob url", - expectedPath: "/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789", + expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", build: func() (string, error) { - return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789") + ref, _ := reference.WithDigest(fooBarRef, "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") + return urlBuilder.BuildBlobURL(ref) }, }, { description: "build blob upload url", expectedPath: "/v2/foo/bar/blobs/uploads/", build: func() (string, error) { - return urlBuilder.BuildBlobUploadURL("foo/bar") + return urlBuilder.BuildBlobUploadURL(fooBarRef) }, }, { description: "build blob upload url with digest and size", - expectedPath: "/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", + expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", build: func() (string, error) { - return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{ + return urlBuilder.BuildBlobUploadURL(fooBarRef, url.Values{ "size": []string{"10000"}, - "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, + "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, }) }, }, @@ -61,16 +66,16 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { description: "build blob upload chunk url", expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", build: func() (string, error) { - return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part") + return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part") }, }, { description: "build blob upload chunk url with digest and size", - expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", build: func() (string, error) { - return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{ + return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part", url.Values{ "size": []string{"10000"}, - "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, + "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, }) }, }, @@ -87,25 +92,31 @@ func TestURLBuilder(t *testing.T) { "https://localhost:5443", } - for _, root := range roots { - urlBuilder, err := NewURLBuilderFromString(root) - if err != nil { - t.Fatalf("unexpected error creating urlbuilder: %v", err) - } - - for _, testCase := range makeURLBuilderTestCases(urlBuilder) { - url, err := testCase.build() + doTest := func(relative bool) { + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root, relative) if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) + t.Fatalf("unexpected error creating urlbuilder: %v", err) } - expectedURL := root + testCase.expectedPath + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + expectedURL := testCase.expectedPath + if !relative { + expectedURL = root + expectedURL + } - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } } } } + doTest(true) + doTest(false) } func TestURLBuilderWithPrefix(t *testing.T) { @@ -116,25 +127,31 @@ func TestURLBuilderWithPrefix(t *testing.T) { "https://localhost:5443/prefix/", } - for _, root := range roots { - urlBuilder, err := NewURLBuilderFromString(root) - if err != nil { - t.Fatalf("unexpected error creating urlbuilder: %v", err) - } - - for _, testCase := range makeURLBuilderTestCases(urlBuilder) { - url, err := testCase.build() + doTest := func(relative bool) { + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root, relative) if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) + t.Fatalf("unexpected error creating urlbuilder: %v", err) } - expectedURL := root[0:len(root)-1] + testCase.expectedPath + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + expectedURL := testCase.expectedPath + if !relative { + expectedURL = root[0:len(root)-1] + expectedURL + } + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } } } } + doTest(true) + doTest(false) } type builderFromRequestTestCase struct { @@ -166,6 +183,11 @@ func TestBuilderFromRequest(t *testing.T) { request: &http.Request{URL: u, Host: u.Host}, base: "http://example.com", }, + + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "http://example.com", + }, { request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, base: "https://example.com", @@ -187,28 +209,48 @@ func TestBuilderFromRequest(t *testing.T) { }, }, } - - for _, tr := range testRequests { - var builder *URLBuilder - if tr.configHost.Scheme != "" && tr.configHost.Host != "" { - builder = NewURLBuilder(&tr.configHost) - } else { - builder = NewURLBuilderFromRequest(tr.request) - } - - for _, testCase := range makeURLBuilderTestCases(builder) { - url, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) + doTest := func(relative bool) { + for _, tr := range testRequests { + var builder *URLBuilder + if tr.configHost.Scheme != "" && tr.configHost.Host != "" { + builder = NewURLBuilder(&tr.configHost, relative) + } else { + builder = NewURLBuilderFromRequest(tr.request, relative) } - expectedURL := tr.base + testCase.expectedPath + for _, testCase := range makeURLBuilderTestCases(builder) { + buildURL, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + var expectedURL string + proto, ok := tr.request.Header["X-Forwarded-Proto"] + if !ok { + expectedURL = testCase.expectedPath + if !relative { + expectedURL = tr.base + expectedURL + } + } else { + urlBase, err := url.Parse(tr.base) + if err != nil { + t.Fatal(err) + } + urlBase.Scheme = proto[0] + expectedURL = testCase.expectedPath + if !relative { + expectedURL = urlBase.String() + expectedURL + } + } - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + if buildURL != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) + } } } } + doTest(true) + doTest(false) } func TestBuilderFromRequestWithPrefix(t *testing.T) { @@ -229,6 +271,11 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { request: &http.Request{URL: u, Host: u.Host}, base: "http://example.com/prefix/", }, + + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "http://example.com/prefix/", + }, { request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, base: "https://example.com/prefix/", @@ -244,24 +291,43 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { }, } + var relative bool for _, tr := range testRequests { var builder *URLBuilder if tr.configHost.Scheme != "" && tr.configHost.Host != "" { - builder = NewURLBuilder(&tr.configHost) + builder = NewURLBuilder(&tr.configHost, false) } else { - builder = NewURLBuilderFromRequest(tr.request) + builder = NewURLBuilderFromRequest(tr.request, false) } for _, testCase := range makeURLBuilderTestCases(builder) { - url, err := testCase.build() + buildURL, err := testCase.build() if err != nil { t.Fatalf("%s: error building url: %v", testCase.description, err) } - expectedURL := tr.base[0:len(tr.base)-1] + testCase.expectedPath + var expectedURL string + proto, ok := tr.request.Header["X-Forwarded-Proto"] + if !ok { + expectedURL = testCase.expectedPath + if !relative { + expectedURL = tr.base[0:len(tr.base)-1] + expectedURL + } + } else { + urlBase, err := url.Parse(tr.base) + if err != nil { + t.Fatal(err) + } + urlBase.Scheme = proto[0] + expectedURL = testCase.expectedPath + if !relative { + expectedURL = urlBase.String()[0:len(urlBase.String())-1] + expectedURL + } + + } - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + if buildURL != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) } } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go index b3bb580d2263..0ba2eba3e71b 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go @@ -39,6 +39,16 @@ import ( "github.com/docker/distribution/context" ) +const ( + // UserKey is used to get the user object from + // a user context + UserKey = "auth.user" + + // UserNameKey is used to get the user name from + // a user context + UserNameKey = "auth.user.name" +) + // UserInfo carries information about // an autenticated/authorized client. type UserInfo struct { @@ -102,9 +112,9 @@ type userInfoContext struct { func (uic userInfoContext) Value(key interface{}) interface{} { switch key { - case "auth.user": + case UserKey: return uic.user - case "auth.user.name": + case UserNameKey: return uic.user.Name } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go index 82d3556dcce0..6e7ba18096d8 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go @@ -20,7 +20,7 @@ var ( ErrInvalidCredential = errors.New("invalid authorization credential") // ErrAuthenticationFailure returned when authentication failure to be presented to agent. - ErrAuthenticationFailure = errors.New("authentication failured") + ErrAuthenticationFailure = errors.New("authentication failure") ) type accessController struct { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go index db0405475819..553f05cf9a55 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go @@ -56,7 +56,7 @@ func TestBasicAccessController(t *testing.T) { } } - userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) if !ok { t.Fatal("basic accessController did not set auth.user context") } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go deleted file mode 100644 index 2b801d946e18..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go +++ /dev/null @@ -1,97 +0,0 @@ -// Package silly provides a simple authentication scheme that checks for the -// existence of an Authorization header and issues access if is present and -// non-empty. -// -// This package is present as an example implementation of a minimal -// auth.AccessController and for testing. This is not suitable for any kind of -// production security. -package silly - -import ( - "fmt" - "net/http" - "strings" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" -) - -// accessController provides a simple implementation of auth.AccessController -// that simply checks for a non-empty Authorization header. It is useful for -// demonstration and testing. -type accessController struct { - realm string - service string -} - -var _ auth.AccessController = &accessController{} - -func newAccessController(options map[string]interface{}) (auth.AccessController, error) { - realm, present := options["realm"] - if _, ok := realm.(string); !present || !ok { - return nil, fmt.Errorf(`"realm" must be set for silly access controller`) - } - - service, present := options["service"] - if _, ok := service.(string); !present || !ok { - return nil, fmt.Errorf(`"service" must be set for silly access controller`) - } - - return &accessController{realm: realm.(string), service: service.(string)}, nil -} - -// Authorized simply checks for the existence of the authorization header, -// responding with a bearer challenge if it doesn't exist. -func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { - req, err := context.GetRequest(ctx) - if err != nil { - return nil, err - } - - if req.Header.Get("Authorization") == "" { - challenge := challenge{ - realm: ac.realm, - service: ac.service, - } - - if len(accessRecords) > 0 { - var scopes []string - for _, access := range accessRecords { - scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) - } - challenge.scope = strings.Join(scopes, " ") - } - - return nil, &challenge - } - - return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil -} - -type challenge struct { - realm string - service string - scope string -} - -var _ auth.Challenge = challenge{} - -// SetHeaders sets a simple bearer challenge on the response. -func (ch challenge) SetHeaders(w http.ResponseWriter) { - header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) - - if ch.scope != "" { - header = fmt.Sprintf("%s,scope=%q", header, ch.scope) - } - - w.Header().Set("WWW-Authenticate", header) -} - -func (ch challenge) Error() string { - return fmt.Sprintf("silly authentication challenge: %#v", ch) -} - -// init registers the silly auth backend. -func init() { - auth.Register("silly", auth.InitFunc(newAccessController)) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go deleted file mode 100644 index ff2155b1873f..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package silly - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/auth" -) - -func TestSillyAccessController(t *testing.T) { - ac := &accessController{ - realm: "test-realm", - service: "test-service", - } - - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := context.WithValue(nil, "http.request", r) - authCtx, err := ac.Authorized(ctx) - if err != nil { - switch err := err.(type) { - case auth.Challenge: - err.SetHeaders(w) - w.WriteHeader(http.StatusUnauthorized) - return - default: - t.Fatalf("unexpected error authorizing request: %v", err) - } - } - - userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) - if !ok { - t.Fatal("silly accessController did not set auth.user context") - } - - if userInfo.Name != "silly" { - t.Fatalf("expected user name %q, got %q", "silly", userInfo.Name) - } - - w.WriteHeader(http.StatusNoContent) - })) - - resp, err := http.Get(server.URL) - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer resp.Body.Close() - - // Request should not be authorized - if resp.StatusCode != http.StatusUnauthorized { - t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) - } - - req, err := http.NewRequest("GET", server.URL, nil) - if err != nil { - t.Fatalf("unexpected error creating new request: %v", err) - } - req.Header.Set("Authorization", "seriously, anything") - - resp, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("unexpected error during GET: %v", err) - } - defer resp.Body.Close() - - // Request should not be authorized - if resp.StatusCode != http.StatusNoContent { - t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go index 166816eeaa3d..2598f362a1f9 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go @@ -52,11 +52,11 @@ type ClaimSet struct { // Header describes the header section of a JSON Web Token. type Header struct { - Type string `json:"typ"` - SigningAlg string `json:"alg"` - KeyID string `json:"kid,omitempty"` - X5c []string `json:"x5c,omitempty"` - RawJWK json.RawMessage `json:"jwk,omitempty"` + Type string `json:"typ"` + SigningAlg string `json:"alg"` + KeyID string `json:"kid,omitempty"` + X5c []string `json:"x5c,omitempty"` + RawJWK *json.RawMessage `json:"jwk,omitempty"` } // Token describes a JSON Web Token. @@ -193,7 +193,7 @@ func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust. switch { case len(x5c) > 0: signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) - case len(rawJWK) > 0: + case rawJWK != nil: signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) case len(keyID) > 0: signingKey = verifyOpts.TrustedKeys[keyID] @@ -266,8 +266,8 @@ func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtru return } -func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { - pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(rawJWK)) +func parseAndVerifyRawJWK(rawJWK *json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { + pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(*rawJWK)) if err != nil { return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go index 119aa738adbe..827dbbd7571b 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go @@ -94,10 +94,11 @@ func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.Publi func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int) (*Token, error) { signingKey, err := makeSigningKeyWithChain(rootKey, depth) if err != nil { - return nil, fmt.Errorf("unable to amke signing key with chain: %s", err) + return nil, fmt.Errorf("unable to make signing key with chain: %s", err) } - rawJWK, err := signingKey.PublicKey().MarshalJSON() + var rawJWK json.RawMessage + rawJWK, err = signingKey.PublicKey().MarshalJSON() if err != nil { return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err) } @@ -105,7 +106,7 @@ func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey l joseHeader := &Header{ Type: "JWT", SigningAlg: "ES256", - RawJWK: json.RawMessage(rawJWK), + RawJWK: &rawJWK, } now := time.Now() @@ -375,7 +376,7 @@ func TestAccessController(t *testing.T) { t.Fatalf("accessController returned unexpected error: %s", err) } - userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + userInfo, ok := authCtx.Value(auth.UserKey).(auth.UserInfo) if !ok { t.Fatal("token accessController did not set auth.user context") } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go index a6ad45d85439..c8cd83bb976a 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge.go @@ -25,7 +25,7 @@ type Challenge struct { type ChallengeManager interface { // GetChallenges returns the challenges for the given // endpoint URL. - GetChallenges(endpoint string) ([]Challenge, error) + GetChallenges(endpoint url.URL) ([]Challenge, error) // AddResponse adds the response to the challenge // manager. The challenges will be parsed out of @@ -48,8 +48,10 @@ func NewSimpleChallengeManager() ChallengeManager { type simpleChallengeManager map[string][]Challenge -func (m simpleChallengeManager) GetChallenges(endpoint string) ([]Challenge, error) { - challenges := m[endpoint] +func (m simpleChallengeManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + endpoint.Host = strings.ToLower(endpoint.Host) + + challenges := m[endpoint.String()] return challenges, nil } @@ -60,11 +62,10 @@ func (m simpleChallengeManager) AddResponse(resp *http.Response) error { } urlCopy := url.URL{ Path: resp.Request.URL.Path, - Host: resp.Request.URL.Host, + Host: strings.ToLower(resp.Request.URL.Host), Scheme: resp.Request.URL.Scheme, } m[urlCopy.String()] = challenges - return nil } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge_test.go index 9b6a5adc9d2b..953ed5b4dc06 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/authchallenge_test.go @@ -1,7 +1,10 @@ package auth import ( + "fmt" "net/http" + "net/url" + "strings" "testing" ) @@ -36,3 +39,43 @@ func TestAuthChallengeParse(t *testing.T) { } } + +func TestAuthChallengeNormalization(t *testing.T) { + testAuthChallengeNormalization(t, "reg.EXAMPLE.com") + testAuthChallengeNormalization(t, "bɿɒʜɔiɿ-ɿɘƚƨim-ƚol-ɒ-ƨʞnɒʜƚ.com") +} + +func testAuthChallengeNormalization(t *testing.T, host string) { + + scm := NewSimpleChallengeManager() + + url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) + if err != nil { + t.Fatal(err) + } + + resp := &http.Response{ + Request: &http.Request{ + URL: url, + }, + Header: make(http.Header), + StatusCode: http.StatusUnauthorized, + } + resp.Header.Add("WWW-Authenticate", fmt.Sprintf("Bearer realm=\"https://%s/token\",service=\"registry.example.com\"", host)) + + err = scm.AddResponse(resp) + if err != nil { + t.Fatal(err) + } + + lowered := *url + lowered.Host = strings.ToLower(lowered.Host) + c, err := scm.GetChallenges(lowered) + if err != nil { + t.Fatal(err) + } + + if len(c) == 0 { + t.Fatal("Expected challenge for lower-cased-host URL") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go index 6c92fc343e1e..f3497b17ad2f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session.go @@ -15,6 +15,18 @@ import ( "github.com/docker/distribution/registry/client/transport" ) +var ( + // ErrNoBasicAuthCredentials is returned if a request can't be authorized with + // basic auth due to lack of credentials. + ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") + + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") +) + +const defaultClientID = "registry-client" + // AuthenticationHandler is an interface for authorizing a request from // params from a "WWW-Authenicate" header for a single scheme. type AuthenticationHandler interface { @@ -32,6 +44,14 @@ type AuthenticationHandler interface { type CredentialStore interface { // Basic returns basic auth for the given URL Basic(*url.URL) (string, string) + + // RefreshToken returns a refresh token for the + // given URL and service + RefreshToken(*url.URL, string) string + + // SetRefreshToken sets the refresh token if none + // is provided for the given url and service + SetRefreshToken(realm *url.URL, service, token string) } // NewAuthorizer creates an authorizer which can handle multiple authentication @@ -63,9 +83,7 @@ func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { Path: req.URL.Path[:v2Root+4], } - pingEndpoint := ping.String() - - challenges, err := ea.challenges.GetChallenges(pingEndpoint) + challenges, err := ea.challenges.GetChallenges(ping) if err != nil { return err } @@ -101,25 +119,47 @@ type clock interface { type tokenHandler struct { header http.Header creds CredentialStore - scope tokenScope transport http.RoundTripper clock clock + offlineAccess bool + forceOAuth bool + clientID string + scopes []Scope + tokenLock sync.Mutex tokenCache string tokenExpiration time.Time } -// tokenScope represents the scope at which a token will be requested. -// This represents a specific action on a registry resource. -type tokenScope struct { - Resource string - Scope string - Actions []string +// Scope is a type which is serializable to a string +// using the allow scope grammar. +type Scope interface { + String() string +} + +// RepositoryScope represents a token scope for access +// to a repository. +type RepositoryScope struct { + Repository string + Actions []string } -func (ts tokenScope) String() string { - return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) +// String returns the string representation of the repository +// using the scope grammar +func (rs RepositoryScope) String() string { + return fmt.Sprintf("repository:%s:%s", rs.Repository, strings.Join(rs.Actions, ",")) +} + +// TokenHandlerOptions is used to configure a new token handler +type TokenHandlerOptions struct { + Transport http.RoundTripper + Credentials CredentialStore + + OfflineAccess bool + ForceOAuth bool + ClientID string + Scopes []Scope } // An implementation of clock for providing real time data. @@ -131,21 +171,33 @@ func (realClock) Now() time.Time { return time.Now() } // NewTokenHandler creates a new AuthenicationHandler which supports // fetching tokens from a remote token server. func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { - return newTokenHandler(transport, creds, realClock{}, scope, actions...) + // Create options... + return NewTokenHandlerWithOptions(TokenHandlerOptions{ + Transport: transport, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: scope, + Actions: actions, + }, + }, + }) } -// newTokenHandler exposes the option to provide a clock to manipulate time in unit testing. -func newTokenHandler(transport http.RoundTripper, creds CredentialStore, c clock, scope string, actions ...string) AuthenticationHandler { - return &tokenHandler{ - transport: transport, - creds: creds, - clock: c, - scope: tokenScope{ - Resource: "repository", - Scope: scope, - Actions: actions, - }, +// NewTokenHandlerWithOptions creates a new token handler using the provided +// options structure. +func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { + handler := &tokenHandler{ + transport: options.Transport, + creds: options.Credentials, + offlineAccess: options.OfflineAccess, + forceOAuth: options.ForceOAuth, + clientID: options.ClientID, + scopes: options.Scopes, + clock: realClock{}, } + + return handler } func (th *tokenHandler) client() *http.Client { @@ -160,71 +212,164 @@ func (th *tokenHandler) Scheme() string { } func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - if err := th.refreshToken(params); err != nil { + var additionalScopes []string + if fromParam := req.URL.Query().Get("from"); fromParam != "" { + additionalScopes = append(additionalScopes, RepositoryScope{ + Repository: fromParam, + Actions: []string{"pull"}, + }.String()) + } + + token, err := th.getToken(params, additionalScopes...) + if err != nil { return err } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.tokenCache)) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) return nil } -func (th *tokenHandler) refreshToken(params map[string]string) error { +func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { th.tokenLock.Lock() defer th.tokenLock.Unlock() + scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) + for _, scope := range th.scopes { + scopes = append(scopes, scope.String()) + } + var addedScopes bool + for _, scope := range additionalScopes { + scopes = append(scopes, scope) + addedScopes = true + } + now := th.clock.Now() - if now.After(th.tokenExpiration) { - tr, err := th.fetchToken(params) + if now.After(th.tokenExpiration) || addedScopes { + token, expiration, err := th.fetchToken(params, scopes) if err != nil { - return err + return "", err } - th.tokenCache = tr.Token - th.tokenExpiration = tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second) + + // do not update cache for added scope tokens + if !addedScopes { + th.tokenCache = token + th.tokenExpiration = expiration + } + + return token, nil } - return nil + return th.tokenCache, nil } -type tokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` +type postTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` } -func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenResponse, err error) { - //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) - realm, ok := params["realm"] - if !ok { - return nil, errors.New("no realm specified for token auth challenge") - } +func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { + form := url.Values{} + form.Set("scope", strings.Join(scopes, " ")) + form.Set("service", service) - // TODO(dmcgowan): Handle empty scheme + clientID := th.clientID + if clientID == "" { + // Use default client, this is a required field + clientID = defaultClientID + } + form.Set("client_id", clientID) + + if refreshToken != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", refreshToken) + } else if th.creds != nil { + form.Set("grant_type", "password") + username, password := th.creds.Basic(realm) + form.Set("username", username) + form.Set("password", password) + + // attempt to get a refresh token + form.Set("access_type", "offline") + } else { + // refuse to do oauth without a grant type + return "", time.Time{}, fmt.Errorf("no supported grant type") + } - realmURL, err := url.Parse(realm) + resp, err := th.client().PostForm(realm.String(), form) if err != nil { - return nil, fmt.Errorf("invalid token auth challenge realm: %s", err) + return "", time.Time{}, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err + } + + decoder := json.NewDecoder(resp.Body) + + var tr postTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) } - req, err := http.NewRequest("GET", realmURL.String(), nil) + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +type getTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { + + req, err := http.NewRequest("GET", realm.String(), nil) if err != nil { - return nil, err + return "", time.Time{}, err } reqParams := req.URL.Query() - service := params["service"] - scope := th.scope.String() if service != "" { reqParams.Add("service", service) } - for _, scopeField := range strings.Fields(scope) { - reqParams.Add("scope", scopeField) + for _, scope := range scopes { + reqParams.Add("scope", scope) + } + + if th.offlineAccess { + reqParams.Add("offline_token", "true") + clientID := th.clientID + if clientID == "" { + clientID = defaultClientID + } + reqParams.Add("client_id", clientID) } if th.creds != nil { - username, password := th.creds.Basic(realmURL) + username, password := th.creds.Basic(realm) if username != "" && password != "" { reqParams.Add("account", username) req.SetBasicAuth(username, password) @@ -235,19 +380,24 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon resp, err := th.client().Do(req) if err != nil { - return nil, err + return "", time.Time{}, err } defer resp.Body.Close() if !client.SuccessStatus(resp.StatusCode) { - return nil, fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err } decoder := json.NewDecoder(resp.Body) - tr := new(tokenResponse) - if err = decoder.Decode(tr); err != nil { - return nil, fmt.Errorf("unable to decode token response: %s", err) + var tr getTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && th.creds != nil { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) } // `access_token` is equivalent to `token` and if both are specified @@ -258,21 +408,48 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon } if tr.Token == "" { - return nil, errors.New("authorization server did not include a token in the response") + return "", time.Time{}, ErrNoToken } if tr.ExpiresIn < minimumTokenLifetimeSeconds { - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) // The default/minimum lifetime. tr.ExpiresIn = minimumTokenLifetimeSeconds + logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) } if tr.IssuedAt.IsZero() { // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now() + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { + realm, ok := params["realm"] + if !ok { + return "", time.Time{}, errors.New("no realm specified for token auth challenge") + } + + // TODO(dmcgowan): Handle empty scheme and relative realm + realmURL, err := url.Parse(realm) + if err != nil { + return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + service := params["service"] + + var refreshToken string + + if th.creds != nil { + refreshToken = th.creds.RefreshToken(realmURL, service) + } + + if refreshToken != "" || th.forceOAuth { + return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) } - return tr, nil + return th.fetchTokenWithBasicAuth(realmURL, service, scopes) } type basicHandler struct { @@ -299,5 +476,5 @@ func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]st return nil } } - return errors.New("no basic auth credentials") + return ErrNoBasicAuthCredentials } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session_test.go index f1686942dd10..96c62990fb86 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/auth/session_test.go @@ -80,14 +80,25 @@ func ping(manager ChallengeManager, endpoint, versionHeader string) ([]APIVersio } type testCredentialStore struct { - username string - password string + username string + password string + refreshTokens map[string]string } func (tcs *testCredentialStore) Basic(*url.URL) (string, string) { return tcs.username, tcs.password } +func (tcs *testCredentialStore) RefreshToken(u *url.URL, service string) string { + return tcs.refreshTokens[service] +} + +func (tcs *testCredentialStore) SetRefreshToken(u *url.URL, service string, token string) { + if tcs.refreshTokens != nil { + tcs.refreshTokens[service] = token + } +} + func TestEndpointAuthorizeToken(t *testing.T) { service := "localhost.localdomain" repo1 := "some/registry" @@ -162,14 +173,11 @@ func TestEndpointAuthorizeToken(t *testing.T) { t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) } - badCheck := func(a string) bool { - return a == "Bearer statictoken" - } - e2, c2 := testServerWithAuth(m, authenicate, badCheck) + e2, c2 := testServerWithAuth(m, authenicate, validCheck) defer c2() challengeManager2 := NewSimpleChallengeManager() - versions, err = ping(challengeManager2, e+"/v2/", "x-multi-api-version") + versions, err = ping(challengeManager2, e2+"/v2/", "x-multi-api-version") if err != nil { t.Fatal(err) } @@ -199,6 +207,161 @@ func TestEndpointAuthorizeToken(t *testing.T) { } } +func TestEndpointAuthorizeRefreshToken(t *testing.T) { + service := "localhost.localdomain" + repo1 := "some/registry" + repo2 := "other/registry" + scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) + scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) + refreshToken1 := "0123456790abcdef" + refreshToken2 := "0123456790fedcba" + tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope1), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken1)), + }, + }, + { + // In the future this test may fail and require using basic auth to get a different refresh token + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken1, url.QueryEscape(scope2), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(fmt.Sprintf(`{"access_token":"statictoken","refresh_token":"%s"}`, refreshToken2)), + }, + }, + { + Request: testutil.Request{ + Method: "POST", + Route: "/token", + Body: []byte(fmt.Sprintf("client_id=registry-client&grant_type=refresh_token&refresh_token=%s&scope=%s&service=%s", refreshToken2, url.QueryEscape(scope2), service)), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: []byte(`{"access_token":"badtoken","refresh_token":"%s"}`), + }, + }, + }) + te, tc := testServer(tokenMap) + defer tc() + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/hello", + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + }, + }, + }) + + authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) + validCheck := func(a string) bool { + return a == "Bearer statictoken" + } + e, c := testServerWithAuth(m, authenicate, validCheck) + defer c() + + challengeManager1 := NewSimpleChallengeManager() + versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + creds := &testCredentialStore{ + refreshTokens: map[string]string{ + service: refreshToken1, + }, + } + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, creds, repo1, "pull", "push"))) + client := &http.Client{Transport: transport1} + + req, _ := http.NewRequest("GET", e+"/v2/hello", nil) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) + } + + // Try with refresh token setting + e2, c2 := testServerWithAuth(m, authenicate, validCheck) + defer c2() + + challengeManager2 := NewSimpleChallengeManager() + versions, err = ping(challengeManager2, e2+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if len(versions) != 1 { + t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + + transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, creds, repo2, "pull", "push"))) + client2 := &http.Client{Transport: transport2} + + req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) + resp, err = client2.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusAccepted { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } + + if creds.refreshTokens[service] != refreshToken2 { + t.Fatalf("Refresh token not set after change") + } + + // Try with bad token + e3, c3 := testServerWithAuth(m, authenicate, validCheck) + defer c3() + + challengeManager3 := NewSimpleChallengeManager() + versions, err = ping(challengeManager3, e3+"/v2/", "x-api-version") + if err != nil { + t.Fatal(err) + } + if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { + t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) + } + + transport3 := transport.NewTransport(nil, NewAuthorizer(challengeManager3, NewTokenHandler(nil, creds, repo2, "pull", "push"))) + client3 := &http.Client{Transport: transport3} + + req, _ = http.NewRequest("GET", e3+"/v2/hello", nil) + resp, err = client3.Do(req) + if err != nil { + t.Fatalf("Error sending get request: %s", err) + } + + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) + } +} + func basicAuth(username, password string) string { auth := username + ":" + password return base64.StdEncoding.EncodeToString([]byte(auth)) @@ -379,7 +542,19 @@ func TestEndpointAuthorizeTokenBasicWithExpiresIn(t *testing.T) { t.Fatal(err) } clock := &fakeClock{current: time.Now()} - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, newTokenHandler(nil, creds, clock, repo, "pull", "push"), NewBasicHandler(creds))) + options := TokenHandlerOptions{ + Transport: nil, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: repo, + Actions: []string{"pull", "push"}, + }, + }, + } + tHandler := NewTokenHandlerWithOptions(options) + tHandler.(*tokenHandler).clock = clock + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, tHandler, NewBasicHandler(creds))) client := &http.Client{Transport: transport1} // First call should result in a token exchange @@ -517,7 +692,20 @@ func TestEndpointAuthorizeTokenBasicWithExpiresInAndIssuedAt(t *testing.T) { if err != nil { t.Fatal(err) } - transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, newTokenHandler(nil, creds, clock, repo, "pull", "push"), NewBasicHandler(creds))) + + options := TokenHandlerOptions{ + Transport: nil, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: repo, + Actions: []string{"pull", "push"}, + }, + }, + } + tHandler := NewTokenHandlerWithOptions(options) + tHandler.(*tokenHandler).clock = clock + transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, tHandler, NewBasicHandler(creds))) client := &http.Client{Transport: transport1} // First call should result in a token exchange diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go index c7eee4e8c68a..e3ffcb00fd60 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/blob_writer.go @@ -6,7 +6,6 @@ import ( "io" "io/ioutil" "net/http" - "os" "time" "github.com/docker/distribution" @@ -33,7 +32,7 @@ func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUploadUnknown } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { @@ -104,21 +103,8 @@ func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { } -func (hbu *httpBlobUpload) Seek(offset int64, whence int) (int64, error) { - newOffset := hbu.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset += int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - hbu.offset = newOffset - - return hbu.offset, nil +func (hbu *httpBlobUpload) Size() int64 { + return hbu.offset } func (hbu *httpBlobUpload) ID() string { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go index 7305c021cf56..adbaacf4bba9 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors.go @@ -2,6 +2,7 @@ package client import ( "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -10,6 +11,10 @@ import ( "github.com/docker/distribution/registry/api/errcode" ) +// ErrNoErrorsInBody is returned when a HTTP response body parses to an empty +// errcode.Errors slice. +var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") + // UnexpectedHTTPStatusError is returned when an unexpected HTTP status is // returned when making a registry api call. type UnexpectedHTTPStatusError struct { @@ -17,46 +22,83 @@ type UnexpectedHTTPStatusError struct { } func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) + return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) } // UnexpectedHTTPResponseError is returned when an expected HTTP status code // is returned, but the content was unexpected and failed to be parsed. type UnexpectedHTTPResponseError struct { - ParseErr error - Response []byte + ParseErr error + StatusCode int + Response []byte } func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) + return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) } -func parseHTTPErrorResponse(r io.Reader) error { +func parseHTTPErrorResponse(statusCode int, r io.Reader) error { var errors errcode.Errors body, err := ioutil.ReadAll(r) if err != nil { return err } + // For backward compatibility, handle irregularly formatted + // messages that contain a "details" field. + var detailsErr struct { + Details string `json:"details"` + } + err = json.Unmarshal(body, &detailsErr) + if err == nil && detailsErr.Details != "" { + switch statusCode { + case http.StatusUnauthorized: + return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) + // FIXME: go1.5 doesn't export http.StatusTooManyRequests while + // go1.6 does. Update the hardcoded value to the constant once + // Docker updates golang version to 1.6. + case 429: + return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) + default: + return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) + } + } + if err := json.Unmarshal(body, &errors); err != nil { return &UnexpectedHTTPResponseError{ - ParseErr: err, - Response: body, + ParseErr: err, + StatusCode: statusCode, + Response: body, } } + + if len(errors) == 0 { + // If there was no error specified in the body, return + // UnexpectedHTTPResponseError. + return &UnexpectedHTTPResponseError{ + ParseErr: ErrNoErrorsInBody, + StatusCode: statusCode, + Response: body, + } + } + return errors } -func handleErrorResponse(resp *http.Response) error { +// HandleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +func HandleErrorResponse(resp *http.Response) error { if resp.StatusCode == 401 { - err := parseHTTPErrorResponse(resp.Body) + err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } if resp.StatusCode >= 400 && resp.StatusCode < 500 { - return parseHTTPErrorResponse(resp.Body) + return parseHTTPErrorResponse(resp.StatusCode, resp.Body) } return &UnexpectedHTTPStatusError{Status: resp.Status} } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors_test.go new file mode 100644 index 000000000000..ca9dddd10191 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/errors_test.go @@ -0,0 +1,104 @@ +package client + +import ( + "bytes" + "io" + "net/http" + "strings" + "testing" +) + +type nopCloser struct { + io.Reader +} + +func (nopCloser) Close() error { return nil } + +func TestHandleErrorResponse401ValidBody(t *testing.T) { + json := "{\"errors\":[{\"code\":\"UNAUTHORIZED\",\"message\":\"action requires authentication\"}]}" + response := &http.Response{ + Status: "401 Unauthorized", + StatusCode: 401, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "unauthorized: action requires authentication" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponse401WithInvalidBody(t *testing.T) { + json := "{invalid json}" + response := &http.Response{ + Status: "401 Unauthorized", + StatusCode: 401, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "unauthorized: authentication required" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseExpectedStatusCode400ValidBody(t *testing.T) { + json := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest does not match\"}]}" + response := &http.Response{ + Status: "400 Bad Request", + StatusCode: 400, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "digest invalid: provided digest does not match" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseExpectedStatusCode404EmptyErrorSlice(t *testing.T) { + json := `{"randomkey": "randomvalue"}` + response := &http.Response{ + Status: "404 Not Found", + StatusCode: 404, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := `error parsing HTTP 404 response body: no error details found in HTTP response body: "{\"randomkey\": \"randomvalue\"}"` + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseExpectedStatusCode404InvalidBody(t *testing.T) { + json := "{invalid json}" + response := &http.Response{ + Status: "404 Not Found", + StatusCode: 404, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "error parsing HTTP 404 response body: invalid character 'i' looking for beginning of object key string: \"{invalid json}\"" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseUnexpectedStatusCode501(t *testing.T) { + response := &http.Response{ + Status: "501 Not Implemented", + StatusCode: 501, + Body: nopCloser{bytes.NewBufferString("{\"Error Encountered\" : \"Function not implemented.\"}")}, + } + err := HandleErrorResponse(response) + + expectedMsg := "received unexpected HTTP status: 501 Not Implemented" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go index 1c06e5a07eb4..8cc5f7f9aa03 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository.go @@ -3,6 +3,7 @@ package client import ( "bytes" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -14,7 +15,6 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" @@ -27,16 +27,50 @@ type Registry interface { Repositories(ctx context.Context, repos []string, last string) (n int, err error) } +// checkHTTPRedirect is a callback that can manipulate redirected HTTP +// requests. It is used to preserve Accept and Range headers. +func checkHTTPRedirect(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + + if len(via) > 0 { + for headerName, headerVals := range via[0].Header { + if headerName != "Accept" && headerName != "Range" { + continue + } + for _, val := range headerVals { + // Don't add to redirected request if redirected + // request already has a header with the same + // name and value. + hasValue := false + for _, existingVal := range req.Header[headerName] { + if existingVal == val { + hasValue = true + break + } + } + if !hasValue { + req.Header.Add(headerName, val) + } + } + } + } + + return nil +} + // NewRegistry creates a registry namespace which can be used to get a listing of repositories func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL) + ub, err := v2.NewURLBuilderFromString(baseURL, false) if err != nil { return nil, err } client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, + Transport: transport, + Timeout: 1 * time.Minute, + CheckRedirect: checkHTTPRedirect, } return ®istry{ @@ -91,25 +125,22 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri returnErr = io.EOF } } else { - return 0, handleErrorResponse(resp) + return 0, HandleErrorResponse(resp) } return numFilled, returnErr } // NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - if _, err := reference.ParseNamed(name); err != nil { - return nil, err - } - - ub, err := v2.NewURLBuilderFromString(baseURL) +func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) if err != nil { return nil, err } client := &http.Client{ - Transport: transport, + Transport: transport, + CheckRedirect: checkHTTPRedirect, // TODO(dmcgowan): create cookie jar } @@ -125,21 +156,21 @@ type repository struct { client *http.Client ub *v2.URLBuilder context context.Context - name string + name reference.Named } -func (r *repository) Name() string { +func (r *repository) Named() reference.Named { return r.name } func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { statter := &blobStatter{ - name: r.Name(), + name: r.name, ub: r.ub, client: r.client, } return &blobs{ - name: r.Name(), + name: r.name, ub: r.ub, client: r.client, statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), @@ -149,89 +180,185 @@ func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { // todo(richardscothern): options should be sent over the wire return &manifests{ - name: r.Name(), + name: r.name, ub: r.ub, client: r.client, etags: make(map[string]string), }, nil } -func (r *repository) Signatures() distribution.SignatureService { - ms, _ := r.Manifests(r.context) - return &signatures{ - manifests: ms, +func (r *repository) Tags(ctx context.Context) distribution.TagService { + return &tags{ + client: r.client, + ub: r.ub, + context: r.context, + name: r.Named(), } } -type signatures struct { - manifests distribution.ManifestService +// tags implements remote tagging operations. +type tags struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name reference.Named } -func (s *signatures) Get(dgst digest.Digest) ([][]byte, error) { - m, err := s.manifests.Get(dgst) +// All returns all tags +func (t *tags) All(ctx context.Context) ([]string, error) { + var tags []string + + u, err := t.ub.BuildTagsURL(t.name) if err != nil { - return nil, err + return tags, err } - return m.Signatures() -} -func (s *signatures) Enumerate(dgst digest.Digest) ([]digest.Digest, error) { - return nil, distribution.ErrUnsupported -} + resp, err := t.client.Get(u) + if err != nil { + return tags, err + } + defer resp.Body.Close() -func (s *signatures) Put(dgst digest.Digest, signatures ...[]byte) error { - panic("not implemented") -} + if SuccessStatus(resp.StatusCode) { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return tags, err + } -func (s *signatures) Delete(revision digest.Digest) error { - return distribution.ErrUnsupported + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return tags, err + } + tags = tagsResponse.Tags + return tags, nil + } + return tags, HandleErrorResponse(resp) } -type manifests struct { - name string - ub *v2.URLBuilder - client *http.Client - etags map[string]string +func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { + desc := distribution.Descriptor{} + headers := response.Header + + ctHeader := headers.Get("Content-Type") + if ctHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") + } + desc.MediaType = ctHeader + + digestHeader := headers.Get("Docker-Content-Digest") + if digestHeader == "" { + bytes, err := ioutil.ReadAll(response.Body) + if err != nil { + return distribution.Descriptor{}, err + } + _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + + dgst, err := digest.ParseDigest(digestHeader) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Digest = dgst + + lengthHeader := headers.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") + } + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Size = length + + return desc, nil + } -func (ms *manifests) Tags() ([]string, error) { - u, err := ms.ub.BuildTagsURL(ms.name) +// Get issues a HEAD request for a Manifest against its named endpoint in order +// to construct a descriptor for the tag. If the registry doesn't support HEADing +// a manifest, fallback to GET. +func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + ref, err := reference.WithTag(t.name, tag) if err != nil { - return nil, err + return distribution.Descriptor{}, err + } + u, err := t.ub.BuildManifestURL(ref) + if err != nil { + return distribution.Descriptor{}, err } - resp, err := ms.client.Get(u) + req, err := http.NewRequest("HEAD", u, nil) if err != nil { - return nil, err + return distribution.Descriptor{}, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + + var attempts int + resp, err := t.client.Do(req) +check: + if err != nil { + return distribution.Descriptor{}, err } defer resp.Body.Close() - if SuccessStatus(resp.StatusCode) { - b, err := ioutil.ReadAll(resp.Body) + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + return descriptorFromResponse(resp) + case resp.StatusCode == http.StatusMethodNotAllowed: + req, err = http.NewRequest("GET", u, nil) if err != nil { - return nil, err + return distribution.Descriptor{}, err } - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return nil, err + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) } - return tagsResponse.Tags, nil + resp, err = t.client.Do(req) + attempts++ + if attempts > 1 { + return distribution.Descriptor{}, err + } + goto check + default: + return distribution.Descriptor{}, HandleErrorResponse(resp) } - return nil, handleErrorResponse(resp) } -func (ms *manifests) Exists(dgst digest.Digest) (bool, error) { - // Call by Tag endpoint since the API uses the same - // URL endpoint for tags and digests. - return ms.ExistsByTag(dgst.String()) +func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + panic("not implemented") +} + +func (t *tags) Untag(ctx context.Context, tag string) error { + panic("not implemented") +} + +type manifests struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + etags map[string]string } -func (ms *manifests) ExistsByTag(tag string) (bool, error) { - u, err := ms.ub.BuildManifestURL(ms.name, tag) +func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return false, err + } + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return false, err } @@ -246,49 +373,75 @@ func (ms *manifests) ExistsByTag(tag string) (bool, error) { } else if resp.StatusCode == http.StatusNotFound { return false, nil } - return false, handleErrorResponse(resp) -} - -func (ms *manifests) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { - // Call by Tag endpoint since the API uses the same - // URL endpoint for tags and digests. - return ms.GetByTag(dgst.String()) + return false, HandleErrorResponse(resp) } -// AddEtagToTag allows a client to supply an eTag to GetByTag which will be +// AddEtagToTag allows a client to supply an eTag to Get which will be // used for a conditional HTTP request. If the eTag matches, a nil manifest -// and nil error will be returned. etag is automatically quoted when added to -// this map. +// and ErrManifestNotModified error will be returned. etag is automatically +// quoted when added to this map. func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { - return func(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifests); ok { - ms.etags[tag] = fmt.Sprintf(`"%s"`, etag) - return nil - } - return fmt.Errorf("etag options is a client-only option") + return etagOption{tag, etag} +} + +type etagOption struct{ tag, etag string } + +func (o etagOption) Apply(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifests); ok { + ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) + return nil } + return fmt.Errorf("etag options is a client-only option") } -func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { +func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + var ( + digestOrTag string + ref reference.Named + err error + ) + for _, option := range options { - err := option(ms) + if opt, ok := option.(distribution.WithTagOption); ok { + digestOrTag = opt.Tag + ref, err = reference.WithTag(ms.name, opt.Tag) + if err != nil { + return nil, err + } + } else { + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + } + + if digestOrTag == "" { + digestOrTag = dgst.String() + ref, err = reference.WithDigest(ms.name, dgst) if err != nil { return nil, err } } - u, err := ms.ub.BuildManifestURL(ms.name, tag) + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return nil, err } + req, err := http.NewRequest("GET", u, nil) if err != nil { return nil, err } - if _, ok := ms.etags[tag]; ok { - req.Header.Set("If-None-Match", ms.etags[tag]) + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + + if _, ok := ms.etags[digestOrTag]; ok { + req.Header.Set("If-None-Match", ms.etags[digestOrTag]) } + resp, err := ms.client.Do(req) if err != nil { return nil, err @@ -297,49 +450,96 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic if resp.StatusCode == http.StatusNotModified { return nil, distribution.ErrManifestNotModified } else if SuccessStatus(resp.StatusCode) { - var sm schema1.SignedManifest - decoder := json.NewDecoder(resp.Body) + mt := resp.Header.Get("Content-Type") + body, err := ioutil.ReadAll(resp.Body) - if err := decoder.Decode(&sm); err != nil { + if err != nil { + return nil, err + } + m, _, err := distribution.UnmarshalManifest(mt, body) + if err != nil { return nil, err } - return &sm, nil + return m, nil } - return nil, handleErrorResponse(resp) + return nil, HandleErrorResponse(resp) } -func (ms *manifests) Enumerate() ([]digest.Digest, error) { - return nil, distribution.ErrUnsupported -} +// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the +// tag name in order to build the correct upload URL. +func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + ref := ms.name + var tagged bool -func (ms *manifests) Put(m *schema1.SignedManifest) error { - manifestURL, err := ms.ub.BuildManifestURL(ms.name, m.Tag) + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + var err error + ref, err = reference.WithTag(ref, opt.Tag) + if err != nil { + return "", err + } + tagged = true + } else { + err := option.Apply(ms) + if err != nil { + return "", err + } + } + } + mediaType, p, err := m.Payload() if err != nil { - return err + return "", err } - // todo(richardscothern): do something with options here when they become applicable + if !tagged { + // generate a canonical digest and Put by digest + _, d, err := distribution.UnmarshalManifest(mediaType, p) + if err != nil { + return "", err + } + ref, err = reference.WithDigest(ref, d.Digest) + if err != nil { + return "", err + } + } - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(m.Raw)) + manifestURL, err := ms.ub.BuildManifestURL(ref) if err != nil { - return err + return "", err + } + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) + if err != nil { + return "", err } + putRequest.Header.Set("Content-Type", mediaType) + resp, err := ms.client.Do(putRequest) if err != nil { - return err + return "", err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { - // TODO(dmcgowan): make use of digest header - return nil + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgst, err := digest.ParseDigest(dgstHeader) + if err != nil { + return "", err + } + + return dgst, nil } - return handleErrorResponse(resp) + + return "", HandleErrorResponse(resp) } -func (ms *manifests) Delete(dgst digest.Digest) error { - u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) +func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return err + } + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return err } @@ -357,11 +557,16 @@ func (ms *manifests) Delete(dgst digest.Digest) error { if SuccessStatus(resp.StatusCode) { return nil } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } +// todo(richardscothern): Restore interface and implementation with merge of #1050 +/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + panic("not supported") +}*/ + type blobs struct { - name string + name reference.Named ub *v2.URLBuilder client *http.Client @@ -389,11 +594,7 @@ func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Des } func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - desc, err := bs.Stat(ctx, dgst) - if err != nil { - return nil, err - } - reader, err := bs.Open(ctx, desc.Digest) + reader, err := bs.Open(ctx, dgst) if err != nil { return nil, err } @@ -403,7 +604,11 @@ func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { } func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return nil, err + } + blobURL, err := bs.ub.BuildBlobURL(ref) if err != nil { return nil, err } @@ -413,7 +618,7 @@ func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.Rea if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUnknown } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) }), nil } @@ -444,8 +649,57 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut return writer.Commit(ctx, desc) } -func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { - u, err := bs.ub.BuildBlobUploadURL(bs.name) +// createOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type createOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + } +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*createOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + var opts createOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + var values []url.Values + + if opts.Mount.ShouldMount { + values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) + } + + u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) + if err != nil { + return nil, err + } resp, err := bs.client.Post(u, "", nil) if err != nil { @@ -453,7 +707,14 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { } defer resp.Body.Close() - if SuccessStatus(resp.StatusCode) { + switch resp.StatusCode { + case http.StatusCreated: + desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) + if err != nil { + return nil, err + } + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + case http.StatusAccepted: // TODO(dmcgowan): Check for invalid UUID uuid := resp.Header.Get("Docker-Upload-UUID") location, err := sanitizeLocation(resp.Header.Get("Location"), u) @@ -468,8 +729,9 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { startedAt: time.Now(), location: location, }, nil + default: + return nil, HandleErrorResponse(resp) } - return nil, handleErrorResponse(resp) } func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { @@ -480,18 +742,18 @@ func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { return bs.statter.Clear(ctx, dgst) } -func (bs *blobs) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error { - return distribution.ErrUnsupported -} - type blobStatter struct { - name string + name reference.Named ub *v2.URLBuilder client *http.Client } func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - u, err := bs.ub.BuildBlobURL(bs.name, dgst) + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := bs.ub.BuildBlobURL(ref) if err != nil { return distribution.Descriptor{}, err } @@ -521,7 +783,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi } else if resp.StatusCode == http.StatusNotFound { return distribution.Descriptor{}, distribution.ErrBlobUnknown } - return distribution.Descriptor{}, handleErrorResponse(resp) + return distribution.Descriptor{}, HandleErrorResponse(resp) } func buildCatalogValues(maxEntries int, last string) url.Values { @@ -539,7 +801,11 @@ func buildCatalogValues(maxEntries int, last string) url.Values { } func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return err + } + blobURL, err := bs.ub.BuildBlobURL(ref) if err != nil { return err } @@ -558,7 +824,7 @@ func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { if SuccessStatus(resp.StatusCode) { return nil } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository_test.go index 058947de635e..2faeb2768a58 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/repository_test.go @@ -18,6 +18,7 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/testutil" "github.com/docker/distribution/uuid" @@ -38,16 +39,10 @@ func newRandomBlob(size int) (digest.Digest, []byte) { panic("unable to read enough bytes") } - dgst, err := digest.FromBytes(b) - if err != nil { - panic(err) - } - - return dgst, b + return digest.FromBytes(b), b } func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { - *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", @@ -103,11 +98,11 @@ func addTestCatalog(route string, content []byte, link string, m *testutil.Reque func TestBlobDelete(t *testing.T) { dgst, _ := newRandomBlob(1024) var m testutil.RequestResponseMap - repo := "test.example.com/repo1" + repo, _ := reference.ParseNamed("test.example.com/repo1") m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "DELETE", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusAccepted, @@ -142,7 +137,8 @@ func TestBlobFetch(t *testing.T) { defer c() ctx := context.Background() - r, err := NewRepository(ctx, "test.example.com/repo1", e, nil) + repo, _ := reference.ParseNamed("test.example.com/repo1") + r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } @@ -162,12 +158,12 @@ func TestBlobFetch(t *testing.T) { func TestBlobExistsNoContentLength(t *testing.T) { var m testutil.RequestResponseMap - repo := "biff" + repo, _ := reference.ParseNamed("biff") dgst, content := newRandomBlob(1024) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -182,7 +178,7 @@ func TestBlobExistsNoContentLength(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -221,7 +217,8 @@ func TestBlobExists(t *testing.T) { defer c() ctx := context.Background() - r, err := NewRepository(ctx, "test.example.com/repo1", e, nil) + repo, _ := reference.ParseNamed("test.example.com/repo1") + r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } @@ -252,18 +249,18 @@ func TestBlobUploadChunked(t *testing.T) { b1[512:513], b1[513:1024], } - repo := "test.example.com/uploadrepo" + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") uuids := []string{uuid.Generate().String()} m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", - Route: "/v2/" + repo + "/blobs/uploads/", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, - "Location": {"/v2/" + repo + "/blobs/uploads/" + uuids[0]}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[0]}, "Docker-Upload-UUID": {uuids[0]}, "Range": {"0-0"}, }), @@ -276,14 +273,14 @@ func TestBlobUploadChunked(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PATCH", - Route: "/v2/" + repo + "/blobs/uploads/" + uuids[i], + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i], Body: chunk, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, - "Location": {"/v2/" + repo + "/blobs/uploads/" + uuids[i+1]}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i+1]}, "Docker-Upload-UUID": {uuids[i+1]}, "Range": {fmt.Sprintf("%d-%d", offset, newOffset-1)}, }), @@ -294,7 +291,7 @@ func TestBlobUploadChunked(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", - Route: "/v2/" + repo + "/blobs/uploads/" + uuids[len(uuids)-1], + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[len(uuids)-1], QueryParams: map[string][]string{ "digest": {dgst.String()}, }, @@ -311,7 +308,7 @@ func TestBlobUploadChunked(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -367,18 +364,18 @@ func TestBlobUploadChunked(t *testing.T) { func TestBlobUploadMonolithic(t *testing.T) { dgst, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap - repo := "test.example.com/uploadrepo" + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") uploadID := uuid.Generate().String() m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", - Route: "/v2/" + repo + "/blobs/uploads/", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, - "Location": {"/v2/" + repo + "/blobs/uploads/" + uploadID}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, "Docker-Upload-UUID": {uploadID}, "Range": {"0-0"}, }), @@ -387,13 +384,13 @@ func TestBlobUploadMonolithic(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PATCH", - Route: "/v2/" + repo + "/blobs/uploads/" + uploadID, + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, Body: b1, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ - "Location": {"/v2/" + repo + "/blobs/uploads/" + uploadID}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, "Docker-Upload-UUID": {uploadID}, "Content-Length": {"0"}, "Docker-Content-Digest": {dgst.String()}, @@ -404,7 +401,7 @@ func TestBlobUploadMonolithic(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", - Route: "/v2/" + repo + "/blobs/uploads/" + uploadID, + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, QueryParams: map[string][]string{ "digest": {dgst.String()}, }, @@ -421,7 +418,7 @@ func TestBlobUploadMonolithic(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -472,7 +469,72 @@ func TestBlobUploadMonolithic(t *testing.T) { } } -func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { +func TestBlobMount(t *testing.T) { + dgst, content := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") + + sourceRepo, _ := reference.ParseNamed("test.example.com/sourcerepo") + canonicalRef, _ := reference.WithDigest(sourceRepo, dgst) + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", + QueryParams: map[string][]string{"from": {sourceRepo.Name()}, "mount": {dgst.String()}}, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo.Name() + "/blobs/" + dgst.String()}, + "Docker-Content-Digest": {dgst.String()}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + + l := r.Blobs(ctx) + + bw, err := l.Create(ctx, WithMountFrom(canonicalRef)) + if bw != nil { + t.Fatalf("Expected blob writer to be nil, was %v", bw) + } + + if ebm, ok := err.(distribution.ErrBlobMounted); ok { + if ebm.From.Digest() != dgst { + t.Fatalf("Unexpected digest: %s, expected %s", ebm.From.Digest(), dgst) + } + if ebm.From.Name() != sourceRepo.Name() { + t.Fatalf("Unexpected from: %s, expected %s", ebm.From.Name(), sourceRepo) + } + } else { + t.Fatalf("Unexpected error: %v, expected an ErrBlobMounted", err) + } +} + +func newRandomSchemaV1Manifest(name reference.Named, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { blobs := make([]schema1.FSLayer, blobCount) history := make([]schema1.History, blobCount) @@ -484,7 +546,7 @@ func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.Signed } m := schema1.Manifest{ - Name: name, + Name: name.String(), Tag: tag, Architecture: "x86", FSLayers: blobs, @@ -504,24 +566,14 @@ func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.Signed panic(err) } - p, err := sm.Payload() - if err != nil { - panic(err) - } - - dgst, err := digest.FromBytes(p) - if err != nil { - panic(err) - } - - return sm, dgst, p + return sm, digest.FromBytes(sm.Canonical), sm.Canonical } -func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { - actualDigest, _ := digest.FromBytes(content) +func addTestManifestWithEtag(repo reference.Named, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { + actualDigest := digest.FromBytes(content) getReqWithEtag := testutil.Request{ Method: "GET", - Route: "/v2/" + repo + "/manifests/" + reference, + Route: "/v2/" + repo.Name() + "/manifests/" + reference, Headers: http.Header(map[string][]string{ "If-None-Match": {fmt.Sprintf(`"%s"`, dgst)}, }), @@ -535,6 +587,7 @@ func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {schema1.MediaTypeSignedManifest}, }), } } else { @@ -544,6 +597,7 @@ func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {schema1.MediaTypeSignedManifest}, }), } @@ -551,11 +605,11 @@ func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) } -func addTestManifest(repo, reference string, content []byte, m *testutil.RequestResponseMap) { +func addTestManifest(repo reference.Named, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) { *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/" + repo + "/manifests/" + reference, + Route: "/v2/" + repo.Name() + "/manifests/" + reference, }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -563,19 +617,21 @@ func addTestManifest(repo, reference string, content []byte, m *testutil.Request Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, }), }, }) *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", - Route: "/v2/" + repo + "/manifests/" + reference, + Route: "/v2/" + repo.Name() + "/manifests/" + reference, }, Response: testutil.Response{ StatusCode: http.StatusOK, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, }), }, }) @@ -608,12 +664,18 @@ func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { return nil } -func TestManifestFetch(t *testing.T) { +func TestV1ManifestFetch(t *testing.T) { ctx := context.Background() - repo := "test.example.com/repo" + repo, _ := reference.ParseNamed("test.example.com/repo") m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap - addTestManifest(repo, dgst.String(), m1.Raw, &m) + _, pl, err := m1.Payload() + if err != nil { + t.Fatal(err) + } + addTestManifest(repo, dgst.String(), schema1.MediaTypeSignedManifest, pl, &m) + addTestManifest(repo, "latest", schema1.MediaTypeSignedManifest, pl, &m) + addTestManifest(repo, "badcontenttype", "text/html", pl, &m) e, c := testServer(m) defer c() @@ -627,7 +689,7 @@ func TestManifestFetch(t *testing.T) { t.Fatal(err) } - ok, err := ms.Exists(dgst) + ok, err := ms.Exists(ctx, dgst) if err != nil { t.Fatal(err) } @@ -635,17 +697,48 @@ func TestManifestFetch(t *testing.T) { t.Fatal("Manifest does not exist") } - manifest, err := ms.Get(dgst) + manifest, err := ms.Get(ctx, dgst) + if err != nil { + t.Fatal(err) + } + v1manifest, ok := manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err := checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } + + manifest, err = ms.Get(ctx, dgst, distribution.WithTag("latest")) if err != nil { t.Fatal(err) } - if err := checkEqualManifest(manifest, m1); err != nil { + v1manifest, ok = manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err = checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } + + manifest, err = ms.Get(ctx, dgst, distribution.WithTag("badcontenttype")) + if err != nil { + t.Fatal(err) + } + v1manifest, ok = manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err = checkEqualManifest(v1manifest, m1); err != nil { t.Fatal(err) } } func TestManifestFetchWithEtag(t *testing.T) { - repo := "test.example.com/repo/by/tag" + repo, _ := reference.ParseNamed("test.example.com/repo/by/tag") _, d1, p1 := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap addTestManifestWithEtag(repo, "latest", p1, &m, d1.String()) @@ -653,31 +746,36 @@ func TestManifestFetchWithEtag(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, nil) + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } - ctx := context.Background() + ms, err := r.Manifests(ctx) if err != nil { t.Fatal(err) } - _, err = ms.GetByTag("latest", AddEtagToTag("latest", d1.String())) + clientManifestService, ok := ms.(*manifests) + if !ok { + panic("wrong type for client manifest service") + } + _, err = clientManifestService.Get(ctx, d1, distribution.WithTag("latest"), AddEtagToTag("latest", d1.String())) if err != distribution.ErrManifestNotModified { t.Fatal(err) } } func TestManifestDelete(t *testing.T) { - repo := "test.example.com/repo/delete" + repo, _ := reference.ParseNamed("test.example.com/repo/delete") _, dgst1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) _, dgst2, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "DELETE", - Route: "/v2/" + repo + "/manifests/" + dgst1.String(), + Route: "/v2/" + repo.Name() + "/manifests/" + dgst1.String(), }, Response: testutil.Response{ StatusCode: http.StatusAccepted, @@ -700,24 +798,30 @@ func TestManifestDelete(t *testing.T) { t.Fatal(err) } - if err := ms.Delete(dgst1); err != nil { + if err := ms.Delete(ctx, dgst1); err != nil { t.Fatal(err) } - if err := ms.Delete(dgst2); err == nil { + if err := ms.Delete(ctx, dgst2); err == nil { t.Fatal("Expected error deleting unknown manifest") } // TODO(dmcgowan): Check for specific unknown error } func TestManifestPut(t *testing.T) { - repo := "test.example.com/repo/delete" + repo, _ := reference.ParseNamed("test.example.com/repo/delete") m1, dgst, _ := newRandomSchemaV1Manifest(repo, "other", 6) + + _, payload, err := m1.Payload() + if err != nil { + t.Fatal(err) + } + var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", - Route: "/v2/" + repo + "/manifests/other", - Body: m1.Raw, + Route: "/v2/" + repo.Name() + "/manifests/other", + Body: payload, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, @@ -728,6 +832,22 @@ func TestManifestPut(t *testing.T) { }, }) + putDgst := digest.FromBytes(m1.Canonical) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "PUT", + Route: "/v2/" + repo.Name() + "/manifests/" + putDgst.String(), + Body: payload, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Docker-Content-Digest": {putDgst.String()}, + }), + }, + }) + e, c := testServer(m) defer c() @@ -741,7 +861,11 @@ func TestManifestPut(t *testing.T) { t.Fatal(err) } - if err := ms.Put(m1); err != nil { + if _, err := ms.Put(ctx, m1, distribution.WithTag(m1.Tag)); err != nil { + t.Fatal(err) + } + + if _, err := ms.Put(ctx, m1); err != nil { t.Fatal(err) } @@ -749,7 +873,7 @@ func TestManifestPut(t *testing.T) { } func TestManifestTags(t *testing.T) { - repo := "test.example.com/repo/tags/list" + repo, _ := reference.ParseNamed("test.example.com/repo/tags/list") tagsList := []byte(strings.TrimSpace(` { "name": "test.example.com/repo/tags/list", @@ -761,21 +885,22 @@ func TestManifestTags(t *testing.T) { } `)) var m testutil.RequestResponseMap - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + repo + "/tags/list", - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: tagsList, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(tagsList))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - + for i := 0; i < 3; i++ { + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/tags/list", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: tagsList, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(tagsList))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + } e, c := testServer(m) defer c() @@ -783,34 +908,41 @@ func TestManifestTags(t *testing.T) { if err != nil { t.Fatal(err) } + ctx := context.Background() - ms, err := r.Manifests(ctx) - if err != nil { - t.Fatal(err) - } + tagService := r.Tags(ctx) - tags, err := ms.Tags() + tags, err := tagService.All(ctx) if err != nil { t.Fatal(err) } - if len(tags) != 3 { t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) } - // TODO(dmcgowan): Check array + expected := map[string]struct{}{ + "tag1": {}, + "tag2": {}, + "funtag": {}, + } + for _, t := range tags { + delete(expected, t) + } + if len(expected) != 0 { + t.Fatalf("unexpected tags returned: %v", expected) + } // TODO(dmcgowan): Check for error cases } func TestManifestUnauthorized(t *testing.T) { - repo := "test.example.com/repo" + repo, _ := reference.ParseNamed("test.example.com/repo") _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/" + repo + "/manifests/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/manifests/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusUnauthorized, @@ -831,7 +963,7 @@ func TestManifestUnauthorized(t *testing.T) { t.Fatal(err) } - _, err = ms.Get(dgst) + _, err = ms.Get(ctx, dgst) if err == nil { t.Fatal("Expected error fetching manifest") } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go index b27b6c2379b5..e1b17a03a08d 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -1,12 +1,22 @@ package transport import ( - "bufio" "errors" "fmt" "io" "net/http" "os" + "regexp" + "strconv" +) + +var ( + contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) + + // ErrWrongCodeForByteRange is returned if the client sends a request + // with a Range header but the server returns a 2xx or 3xx code other + // than 206 Partial Content. + ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") ) // ReadSeekCloser combines io.ReadSeeker with io.Closer. @@ -40,8 +50,6 @@ type httpReadSeeker struct { // rc is the remote read closer. rc io.ReadCloser - // brd is a buffer for internal buffered io. - brd *bufio.Reader // readerOffset tracks the offset as of the last read. readerOffset int64 // seekOffset allows Seek to override the offset. Seek changes @@ -58,7 +66,7 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { return 0, hrs.err } - // If we seeked to a different position, we need to reset the + // If we sought to a different position, we need to reset the // connection. This logic is here instead of Seek so that if // a seek is undone before the next read, the connection doesn't // need to be closed and reopened. A common example of this is @@ -79,11 +87,6 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { hrs.seekOffset += int64(n) hrs.readerOffset += int64(n) - // Simulate io.EOF error if we reach filesize. - if err == nil && hrs.size >= 0 && hrs.readerOffset >= hrs.size { - err = io.EOF - } - return n, err } @@ -92,8 +95,18 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { return 0, hrs.err } + lastReaderOffset := hrs.readerOffset + + if whence == os.SEEK_SET && hrs.rc == nil { + // If no request has been made yet, and we are seeking to an + // absolute position, set the read offset as well to avoid an + // unnecessary request. + hrs.readerOffset = offset + } + _, err := hrs.reader() if err != nil { + hrs.readerOffset = lastReaderOffset return 0, err } @@ -101,14 +114,14 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { switch whence { case os.SEEK_CUR: - newOffset += int64(offset) + newOffset += offset case os.SEEK_END: if hrs.size < 0 { return 0, errors.New("content length not known") } - newOffset = hrs.size + int64(offset) + newOffset = hrs.size + offset case os.SEEK_SET: - newOffset = int64(offset) + newOffset = offset } if newOffset < 0 { @@ -131,7 +144,6 @@ func (hrs *httpReadSeeker) Close() error { } hrs.rc = nil - hrs.brd = nil hrs.err = errors.New("httpLayer: closed") @@ -154,7 +166,7 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { } if hrs.rc != nil { - return hrs.brd, nil + return hrs.rc, nil } req, err := http.NewRequest("GET", hrs.url, nil) @@ -163,10 +175,8 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { } if hrs.readerOffset > 0 { - // TODO(stevvooe): Get this working correctly. - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", "1-") + req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) // TODO: get context in here // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) } @@ -179,12 +189,55 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { // Normally would use client.SuccessStatus, but that would be a cyclic // import if resp.StatusCode >= 200 && resp.StatusCode <= 399 { - hrs.rc = resp.Body - if resp.StatusCode == http.StatusOK { + if hrs.readerOffset > 0 { + if resp.StatusCode != http.StatusPartialContent { + return nil, ErrWrongCodeForByteRange + } + + contentRange := resp.Header.Get("Content-Range") + if contentRange == "" { + return nil, errors.New("no Content-Range header found in HTTP 206 response") + } + + submatches := contentRangeRegexp.FindStringSubmatch(contentRange) + if len(submatches) < 4 { + return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) + } + + startByte, err := strconv.ParseUint(submatches[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) + } + + if startByte != uint64(hrs.readerOffset) { + return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) + } + + endByte, err := strconv.ParseUint(submatches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) + } + + if submatches[3] == "*" { + hrs.size = -1 + } else { + size, err := strconv.ParseUint(submatches[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) + } + + if endByte+1 != size { + return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) + } + + hrs.size = int64(size) + } + } else if resp.StatusCode == http.StatusOK { hrs.size = resp.ContentLength } else { hrs.size = -1 } + hrs.rc = resp.Body } else { defer resp.Body.Close() if hrs.errorHandler != nil { @@ -193,11 +246,5 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) } - if hrs.brd == nil { - hrs.brd = bufio.NewReader(hrs.rc) - } else { - hrs.brd.Reset(hrs.rc) - } - - return hrs.brd, nil + return hrs.rc, nil } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go deleted file mode 100644 index a1ba7f3ab02e..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package registry provides the main entrypoints for running a registry. -package registry diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/api_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/api_test.go index 7f52d13d75d0..523ecca28494 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/api_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/api_test.go @@ -18,11 +18,15 @@ import ( "strings" "testing" + "github.com/docker/distribution" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" _ "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -39,7 +43,6 @@ var headerConfig = http.Header{ // 200 OK response. func TestCheckAPI(t *testing.T) { env := newTestEnv(t, false) - baseURL, err := env.builder.BuildBaseURL() if err != nil { t.Fatalf("unexpected error building base url: %v", err) @@ -248,26 +251,22 @@ func TestURLPrefix(t *testing.T) { } type blobArgs struct { - imageName string + imageName reference.Named layerFile io.ReadSeeker layerDigest digest.Digest - tarSumStr string } func makeBlobArgs(t *testing.T) blobArgs { - layerFile, tarSumStr, err := testutil.CreateRandomTarFile() + layerFile, layerDigest, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer file: %v", err) } - layerDigest := digest.Digest(tarSumStr) - args := blobArgs{ - imageName: "foo/bar", layerFile: layerFile, layerDigest: layerDigest, - tarSumStr: tarSumStr, } + args.imageName, _ = reference.ParseNamed("foo/bar") return args } @@ -294,6 +293,79 @@ func TestBlobDelete(t *testing.T) { testBlobDelete(t, env, args) } +func TestRelativeURL(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + config.HTTP.Headers = headerConfig + config.HTTP.RelativeURLs = false + env := newTestEnvWithConfig(t, &config) + ref, _ := reference.WithName("foo/bar") + uploadURLBaseAbs, _ := startPushLayer(t, env, ref) + + u, err := url.Parse(uploadURLBaseAbs) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload chunk with non-relative configuration") + } + + args := makeBlobArgs(t) + resp, err := doPushLayer(t, env.builder, ref, args.layerDigest, uploadURLBaseAbs, args.layerFile) + if err != nil { + t.Fatalf("unexpected error doing layer push relative url: %v", err) + } + checkResponse(t, "relativeurl blob upload", resp, http.StatusCreated) + u, err = url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload with non-relative configuration") + } + + config.HTTP.RelativeURLs = true + args = makeBlobArgs(t) + uploadURLBaseRelative, _ := startPushLayer(t, env, ref) + u, err = url.Parse(uploadURLBaseRelative) + if err != nil { + t.Fatal(err) + } + if u.IsAbs() { + t.Fatal("Absolute URL returned from blob upload chunk with relative configuration") + } + + // Start a new upload in absolute mode to get a valid base URL + config.HTTP.RelativeURLs = false + uploadURLBaseAbs, _ = startPushLayer(t, env, ref) + u, err = url.Parse(uploadURLBaseAbs) + if err != nil { + t.Fatal(err) + } + if !u.IsAbs() { + t.Fatal("Relative URL returned from blob upload chunk with non-relative configuration") + } + + // Complete upload with relative URLs enabled to ensure the final location is relative + config.HTTP.RelativeURLs = true + resp, err = doPushLayer(t, env.builder, ref, args.layerDigest, uploadURLBaseAbs, args.layerFile) + if err != nil { + t.Fatalf("unexpected error doing layer push relative url: %v", err) + } + + checkResponse(t, "relativeurl blob upload", resp, http.StatusCreated) + u, err = url.Parse(resp.Header.Get("Location")) + if err != nil { + t.Fatal(err) + } + if u.IsAbs() { + t.Fatal("Relative URL returned from blob upload with non-relative configuration") + } +} + func TestBlobDeleteDisabled(t *testing.T) { deleteEnabled := false env := newTestEnv(t, deleteEnabled) @@ -301,7 +373,8 @@ func TestBlobDeleteDisabled(t *testing.T) { imageName := args.imageName layerDigest := args.layerDigest - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf("error building url: %v", err) } @@ -324,7 +397,8 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // ----------------------------------- // Test fetch for non-existent content - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf("error building url: %v", err) } @@ -347,7 +421,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // ------------------------------------------ // Start an upload, check the status then cancel - uploadURLBase, uploadUUID := startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID := startPushLayer(t, env, imageName) // A status check should work resp, err = http.Get(uploadURLBase) @@ -382,7 +456,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // ----------------------------------------- // Do layer push with an empty body and different digest - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) resp, err = doPushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) if err != nil { t.Fatalf("unexpected error doing bad layer push: %v", err) @@ -393,12 +467,12 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // ----------------------------------------- // Do layer push with an empty body and correct digest - zeroDigest, err := digest.FromTarArchive(bytes.NewReader([]byte{})) + zeroDigest, err := digest.FromReader(bytes.NewReader([]byte{})) if err != nil { t.Fatalf("unexpected error digesting empty buffer: %v", err) } - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) // ----------------------------------------- @@ -406,12 +480,12 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // This is a valid but empty tarfile! emptyTar := bytes.Repeat([]byte("\x00"), 1024) - emptyDigest, err := digest.FromTarArchive(bytes.NewReader(emptyTar)) + emptyDigest, err := digest.FromReader(bytes.NewReader(emptyTar)) if err != nil { t.Fatalf("unexpected error digesting empty tar: %v", err) } - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) // ------------------------------------------ @@ -419,7 +493,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { layerLength, _ := layerFile.Seek(0, os.SEEK_END) layerFile.Seek(0, os.SEEK_SET) - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) // ------------------------------------------ @@ -433,7 +507,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { canonicalDigest := canonicalDigester.Digest() layerFile.Seek(0, 0) - uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) + uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) finishUpload(t, env.builder, imageName, uploadURLBase, dgst) @@ -476,7 +550,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // ---------------- // Fetch the layer with an invalid digest - badURL := strings.Replace(layerURL, "tarsum", "trsum", 1) + badURL := strings.Replace(layerURL, "sha256", "sha257", 1) resp, err = http.Get(badURL) if err != nil { t.Fatalf("unexpected error fetching layer: %v", err) @@ -523,7 +597,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK) // Missing tests: - // - Upload the same tarsum file under and different repository and + // - Upload the same tar file under and different repository and // ensure the content remains uncorrupted. return env } @@ -534,7 +608,8 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { layerFile := args.layerFile layerDigest := args.layerDigest - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf(err.Error()) } @@ -570,7 +645,7 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { // ---------------- // Attempt to delete a layer with an invalid digest - badURL := strings.Replace(layerURL, "tarsum", "trsum", 1) + badURL := strings.Replace(layerURL, "sha256", "sha257", 1) resp, err = httpDelete(badURL) if err != nil { t.Fatalf("unexpected error fetching layer: %v", err) @@ -582,7 +657,7 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { // Reupload previously deleted blob layerFile.Seek(0, os.SEEK_SET) - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + uploadURLBase, _ := startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) layerFile.Seek(0, os.SEEK_SET) @@ -610,19 +685,19 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { func TestDeleteDisabled(t *testing.T) { env := newTestEnv(t, false) - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") // "build" our layer file - layerFile, tarSumStr, err := testutil.CreateRandomTarFile() + layerFile, layerDigest, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer file: %v", err) } - layerDigest := digest.Digest(tarSumStr) - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf("Error building blob URL") } - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + uploadURLBase, _ := startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) resp, err := httpDelete(layerURL) @@ -636,19 +711,19 @@ func TestDeleteDisabled(t *testing.T) { func TestDeleteReadOnly(t *testing.T) { env := newTestEnv(t, true) - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") // "build" our layer file - layerFile, tarSumStr, err := testutil.CreateRandomTarFile() + layerFile, layerDigest, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer file: %v", err) } - layerDigest := digest.Digest(tarSumStr) - layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) + ref, _ := reference.WithDigest(imageName, layerDigest) + layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf("Error building blob URL") } - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + uploadURLBase, _ := startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) env.app.readOnly = true @@ -665,7 +740,7 @@ func TestStartPushReadOnly(t *testing.T) { env := newTestEnv(t, true) env.app.readOnly = true - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) if err != nil { @@ -696,49 +771,51 @@ func httpDelete(url string) (*http.Response, error) { } type manifestArgs struct { - imageName string - signedManifest *schema1.SignedManifest - dgst digest.Digest -} - -func makeManifestArgs(t *testing.T) manifestArgs { - args := manifestArgs{ - imageName: "foo/bar", - } - - return args + imageName reference.Named + mediaType string + manifest distribution.Manifest + dgst digest.Digest } func TestManifestAPI(t *testing.T) { + schema1Repo, _ := reference.ParseNamed("foo/schema1") + schema2Repo, _ := reference.ParseNamed("foo/schema2") + deleteEnabled := false env := newTestEnv(t, deleteEnabled) - args := makeManifestArgs(t) - testManifestAPI(t, env, args) + testManifestAPISchema1(t, env, schema1Repo) + schema2Args := testManifestAPISchema2(t, env, schema2Repo) + testManifestAPIManifestList(t, env, schema2Args) deleteEnabled = true env = newTestEnv(t, deleteEnabled) - args = makeManifestArgs(t) - testManifestAPI(t, env, args) + testManifestAPISchema1(t, env, schema1Repo) + schema2Args = testManifestAPISchema2(t, env, schema2Repo) + testManifestAPIManifestList(t, env, schema2Args) } func TestManifestDelete(t *testing.T) { + schema1Repo, _ := reference.ParseNamed("foo/schema1") + schema2Repo, _ := reference.ParseNamed("foo/schema2") + deleteEnabled := true env := newTestEnv(t, deleteEnabled) - args := makeManifestArgs(t) - env, args = testManifestAPI(t, env, args) - testManifestDelete(t, env, args) + schema1Args := testManifestAPISchema1(t, env, schema1Repo) + testManifestDelete(t, env, schema1Args) + schema2Args := testManifestAPISchema2(t, env, schema2Repo) + testManifestDelete(t, env, schema2Args) } func TestManifestDeleteDisabled(t *testing.T) { + schema1Repo, _ := reference.ParseNamed("foo/schema1") deleteEnabled := false env := newTestEnv(t, deleteEnabled) - args := makeManifestArgs(t) - testManifestDeleteDisabled(t, env, args) + testManifestDeleteDisabled(t, env, schema1Repo) } -func testManifestDeleteDisabled(t *testing.T, env *testEnv, args manifestArgs) *testEnv { - imageName := args.imageName - manifestURL, err := env.builder.BuildManifestURL(imageName, digest.DigestSha256EmptyTar) +func testManifestDeleteDisabled(t *testing.T, env *testEnv, imageName reference.Named) { + ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar) + manifestURL, err := env.builder.BuildManifestURL(ref) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) } @@ -750,14 +827,14 @@ func testManifestDeleteDisabled(t *testing.T, env *testEnv, args manifestArgs) * defer resp.Body.Close() checkResponse(t, "status of disabled delete of manifest", resp, http.StatusMethodNotAllowed) - return nil } -func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, manifestArgs) { - imageName := args.imageName +func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { tag := "thetag" + args := manifestArgs{imageName: imageName} - manifestURL, err := env.builder.BuildManifestURL(imageName, tag) + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) } @@ -794,7 +871,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m Versioned: manifest.Versioned{ SchemaVersion: 1, }, - Name: imageName, + Name: imageName.Name(), Tag: tag, FSLayers: []schema1.FSLayer{ { @@ -814,10 +891,10 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m }, } - resp = putManifest(t, "putting unsigned manifest", manifestURL, unsignedManifest) + resp = putManifest(t, "putting unsigned manifest", manifestURL, "", unsignedManifest) defer resp.Body.Close() checkResponse(t, "putting unsigned manifest", resp, http.StatusBadRequest) - _, p, counts := checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeManifestInvalid) + _, p, counts := checkBodyHasErrorCodes(t, "putting unsigned manifest", resp, v2.ErrorCodeManifestInvalid) expectedCounts := map[errcode.ErrorCode]int{ v2.ErrorCodeManifestInvalid: 1, @@ -833,7 +910,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m t.Fatalf("error signing manifest: %v", err) } - resp = putManifest(t, "putting signed manifest with errors", manifestURL, sm) + resp = putManifest(t, "putting signed manifest with errors", manifestURL, "", sm) defer resp.Body.Close() checkResponse(t, "putting signed manifest with errors", resp, http.StatusBadRequest) _, p, counts = checkBodyHasErrorCodes(t, "putting signed manifest with errors", resp, @@ -866,7 +943,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m expectedLayers[dgst] = rs unsignedManifest.FSLayers[i].BlobSum = dgst - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) + uploadURLBase, _ := startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) } @@ -877,20 +954,16 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m t.Fatalf("unexpected error signing manifest: %v", err) } - payload, err := signedManifest.Payload() - checkErr(t, err, "getting manifest payload") - - dgst, err := digest.FromBytes(payload) - checkErr(t, err, "digesting manifest") - - args.signedManifest = signedManifest + dgst := digest.FromBytes(signedManifest.Canonical) + args.manifest = signedManifest args.dgst = dgst - manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) checkErr(t, err, "building manifest url") - resp = putManifest(t, "putting signed manifest", manifestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusCreated) + resp = putManifest(t, "putting signed manifest no error", manifestURL, "", signedManifest) + checkResponse(t, "putting signed manifest no error", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, @@ -898,7 +971,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m // -------------------- // Push by digest -- should get same result - resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) + resp = putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, @@ -921,11 +994,12 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m var fetchedManifest schema1.SignedManifest dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifest); err != nil { t.Fatalf("error decoding fetched manifest: %v", err) } - if !bytes.Equal(fetchedManifest.Raw, signedManifest.Raw) { + if !bytes.Equal(fetchedManifest.Canonical, signedManifest.Canonical) { t.Fatalf("manifests do not match") } @@ -947,10 +1021,62 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m t.Fatalf("error decoding fetched manifest: %v", err) } - if !bytes.Equal(fetchedManifestByDigest.Raw, signedManifest.Raw) { + if !bytes.Equal(fetchedManifestByDigest.Canonical, signedManifest.Canonical) { t.Fatalf("manifests do not match") } + // check signature was roundtripped + signatures, err := fetchedManifestByDigest.Signatures() + if err != nil { + t.Fatal(err) + } + + if len(signatures) != 1 { + t.Fatalf("expected 1 signature from manifest, got: %d", len(signatures)) + } + + // Re-sign, push and pull the same digest + sm2, err := schema1.Sign(&fetchedManifestByDigest.Manifest, env.pk) + if err != nil { + t.Fatal(err) + + } + + // Re-push with a few different Content-Types. The official schema1 + // content type should work, as should application/json with/without a + // charset. + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, schema1.MediaTypeSignedManifest, sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json; charset=utf-8", sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json", sm2) + checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) + + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "re-fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "re-fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + // check two signatures were roundtripped + signatures, err = fetchedManifestByDigest.Signatures() + if err != nil { + t.Fatal(err) + } + + if len(signatures) != 2 { + t.Fatalf("expected 2 signature from manifest, got: %d", len(signatures)) + } + // Get by name with etag, gives 304 etag := resp.Header.Get("Etag") req, err := http.NewRequest("GET", manifestURL, nil) @@ -963,7 +1089,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m t.Fatalf("Error constructing request: %s", err) } - checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) // Get by digest with etag, gives 304 req, err = http.NewRequest("GET", manifestDigestURL, nil) @@ -976,7 +1102,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m t.Fatalf("Error constructing request: %s", err) } - checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) // Ensure that the tag is listed. resp, err = http.Get(tagsURL) @@ -985,8 +1111,7 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m } defer resp.Body.Close() - // Check that we get an unknown repository error when asking for tags - checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) + checkResponse(t, "getting tags", resp, http.StatusOK) dec = json.NewDecoder(resp.Body) var tagsResponse tagsAPIResponse @@ -995,8 +1120,8 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m t.Fatalf("unexpected error decoding error response: %v", err) } - if tagsResponse.Name != imageName { - t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName.Name()) } if len(tagsResponse.Tags) != 1 { @@ -1017,178 +1142,868 @@ func testManifestAPI(t *testing.T, env *testEnv, args manifestArgs) (*testEnv, m t.Fatalf("error signing manifest") } - resp = putManifest(t, "putting invalid signed manifest", manifestDigestURL, invalidSigned) + resp = putManifest(t, "putting invalid signed manifest", manifestDigestURL, "", invalidSigned) checkResponse(t, "putting invalid signed manifest", resp, http.StatusBadRequest) - return env, args + return args } -func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { - imageName := args.imageName - dgst := args.dgst - signedManifest := args.signedManifest - manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) - // --------------- - // Delete by digest - resp, err := httpDelete(manifestDigestURL) - checkErr(t, err, "deleting manifest by digest") +func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { + tag := "schema2tag" + args := manifestArgs{ + imageName: imageName, + mediaType: schema2.MediaTypeManifest, + } - checkResponse(t, "deleting manifest", resp, http.StatusAccepted) - checkHeaders(t, resp, http.Header{ - "Content-Length": []string{"0"}, - }) + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } - // --------------- - // Attempt to fetch deleted manifest - resp, err = http.Get(manifestDigestURL) - checkErr(t, err, "fetching deleted manifest by digest") + // ----------------------------- + // Attempt to fetch the manifest + resp, err := http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error getting manifest: %v", err) + } defer resp.Body.Close() - checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) - - // --------------- - // Delete already deleted manifest by digest - resp, err = httpDelete(manifestDigestURL) - checkErr(t, err, "re-deleting manifest by digest") - - checkResponse(t, "re-deleting manifest", resp, http.StatusNotFound) + checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) - // -------------------- - // Re-upload manifest by digest - resp = putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) - checkResponse(t, "putting signed manifest", resp, http.StatusCreated) - checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, - "Docker-Content-Digest": []string{dgst.String()}, - }) + tagsURL, err := env.builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } - // --------------- - // Attempt to fetch re-uploaded deleted digest - resp, err = http.Get(manifestDigestURL) - checkErr(t, err, "fetching re-uploaded manifest by digest") + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } defer resp.Body.Close() - checkResponse(t, "fetching re-uploaded manifest", resp, http.StatusOK) - checkHeaders(t, resp, http.Header{ - "Docker-Content-Digest": []string{dgst.String()}, - }) - - // --------------- - // Attempt to delete an unknown manifest - unknownDigest := "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - unknownManifestDigestURL, err := env.builder.BuildManifestURL(imageName, unknownDigest) - checkErr(t, err, "building unknown manifest url") - - resp, err = httpDelete(unknownManifestDigestURL) - checkErr(t, err, "delting unknown manifest by digest") - checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) - -} - -type testEnv struct { - pk libtrust.PrivateKey - ctx context.Context - config configuration.Configuration - app *App - server *httptest.Server - builder *v2.URLBuilder -} + // Check that we get an unknown repository error when asking for tags + checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) + checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) -func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv { - config := configuration.Configuration{ - Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, - "delete": configuration.Parameters{"enabled": deleteEnabled}, + // -------------------------------- + // Attempt to push manifest with missing config and missing layers + manifest := &schema2.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: schema2.MediaTypeManifest, }, - Proxy: configuration.Proxy{ - RemoteURL: "http://example.com", + Config: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeConfig, + }, + Layers: []distribution.Descriptor{ + { + Digest: "sha256:463434349086340864309863409683460843608348608934092322395278926a", + Size: 6323, + MediaType: schema2.MediaTypeLayer, + }, + { + Digest: "sha256:630923423623623423352523525237238023652897356239852383652aaaaaaa", + Size: 6863, + MediaType: schema2.MediaTypeLayer, + }, }, } - return newTestEnvWithConfig(t, &config) - -} + resp = putManifest(t, "putting missing config manifest", manifestURL, schema2.MediaTypeManifest, manifest) + defer resp.Body.Close() + checkResponse(t, "putting missing config manifest", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "putting missing config manifest", resp, v2.ErrorCodeManifestBlobUnknown) -func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { - config := configuration.Configuration{ - Storage: configuration.Storage{ - "inmemory": configuration.Parameters{}, - "delete": configuration.Parameters{"enabled": deleteEnabled}, - }, + expectedCounts := map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 3, } - config.HTTP.Headers = headerConfig + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } - return newTestEnvWithConfig(t, &config) -} + // Push a config, and reference it in the manifest + sampleConfig := []byte(`{ + "architecture": "amd64", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + } + ], + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + } + }`) + sampleConfigDigest := digest.FromBytes(sampleConfig) -func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *testEnv { - ctx := context.Background() + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, sampleConfigDigest, uploadURLBase, bytes.NewReader(sampleConfig)) + manifest.Config.Digest = sampleConfigDigest + manifest.Config.Size = int64(len(sampleConfig)) - app := NewApp(ctx, config) - server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) - builder, err := v2.NewURLBuilderFromString(server.URL + config.HTTP.Prefix) + // The manifest should still be invalid, because its layer doesn't exist + resp = putManifest(t, "putting missing layer manifest", manifestURL, schema2.MediaTypeManifest, manifest) + defer resp.Body.Close() + checkResponse(t, "putting missing layer manifest", resp, http.StatusBadRequest) + _, p, counts = checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeManifestBlobUnknown) - if err != nil { - t.Fatalf("error creating url builder: %v", err) + expectedCounts = map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 2, } - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key: %v", err) + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) } - return &testEnv{ - pk: pk, - ctx: ctx, - config: *config, - app: app, - server: server, - builder: builder, - } -} + // Push 2 random layers + expectedLayers := make(map[digest.Digest]io.ReadSeeker) + + for i := range manifest.Layers { + rs, dgstStr, err := testutil.CreateRandomTarFile() -func putManifest(t *testing.T, msg, url string, v interface{}) *http.Response { - var body []byte - if sm, ok := v.(*schema1.SignedManifest); ok { - body = sm.Raw - } else { - var err error - body, err = json.MarshalIndent(v, "", " ") if err != nil { - t.Fatalf("unexpected error marshaling %v: %v", v, err) + t.Fatalf("error creating random layer %d: %v", i, err) } - } + dgst := digest.Digest(dgstStr) - req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) - if err != nil { - t.Fatalf("error creating request for %s: %v", msg, err) - } + expectedLayers[dgst] = rs + manifest.Layers[i].Digest = dgst - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("error doing put request while %s: %v", msg, err) + uploadURLBase, _ := startPushLayer(t, env, imageName) + pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) } - return resp -} - -func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) (location string, uuid string) { - layerUploadURL, err := ub.BuildBlobUploadURL(name) + // ------------------- + // Push the manifest with all layers pushed. + deserializedManifest, err := schema2.FromStruct(*manifest) if err != nil { - t.Fatalf("unexpected error building layer upload url: %v", err) + t.Fatalf("could not create DeserializedManifest: %v", err) } - - resp, err := http.Post(layerUploadURL, "", nil) + _, canonical, err := deserializedManifest.Payload() if err != nil { - t.Fatalf("unexpected error starting layer push: %v", err) + t.Fatalf("could not get manifest payload: %v", err) } - defer resp.Body.Close() + dgst := digest.FromBytes(canonical) + args.dgst = dgst + args.manifest = deserializedManifest - checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name), resp, http.StatusAccepted) + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") - u, err := url.Parse(resp.Header.Get("Location")) - if err != nil { + resp = putManifest(t, "putting manifest no error", manifestURL, schema2.MediaTypeManifest, manifest) + checkResponse(t, "putting manifest no error", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting manifest by digest", manifestDigestURL, schema2.MediaTypeManifest, manifest) + checkResponse(t, "putting manifest by digest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error fetching manifest: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifest schema2.DeserializedManifest + dec := json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedManifest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err := fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting manifest payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // --------------- + // Fetch by digest + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + checkErr(t, err, "fetching manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestByDigest schema2.DeserializedManifest + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err = fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting manifest payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) + dec = json.NewDecoder(resp.Body) + + var tagsResponse tagsAPIResponse + + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } + + // ------------------ + // Fetch as a schema1 manifest + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest as schema1: %v", err) + } + defer resp.Body.Close() + + manifestBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) + } + + checkResponse(t, "fetching uploaded manifest as schema1", resp, http.StatusOK) + + m, desc, err := distribution.UnmarshalManifest(schema1.MediaTypeManifest, manifestBytes) + if err != nil { + t.Fatalf("unexpected error unmarshalling manifest: %v", err) + } + + fetchedSchema1Manifest, ok := m.(*schema1.SignedManifest) + if !ok { + t.Fatalf("expecting schema1 manifest") + } + + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{desc.Digest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, desc.Digest)}, + }) + + if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { + t.Fatal("wrong schema version") + } + if fetchedSchema1Manifest.Architecture != "amd64" { + t.Fatal("wrong architecture") + } + if fetchedSchema1Manifest.Name != imageName.Name() { + t.Fatal("wrong image name") + } + if fetchedSchema1Manifest.Tag != tag { + t.Fatal("wrong tag") + } + if len(fetchedSchema1Manifest.FSLayers) != 2 { + t.Fatal("wrong number of FSLayers") + } + for i := range manifest.Layers { + if fetchedSchema1Manifest.FSLayers[i].BlobSum != manifest.Layers[len(manifest.Layers)-i-1].Digest { + t.Fatalf("blob digest mismatch in schema1 manifest for layer %d", i) + } + } + if len(fetchedSchema1Manifest.History) != 2 { + t.Fatal("wrong number of History entries") + } + + // Don't check V1Compatibility fields because we're using randomly-generated + // layers. + + return args +} + +func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) { + imageName := args.imageName + tag := "manifestlisttag" + + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) + if err != nil { + t.Fatalf("unexpected error getting manifest url: %v", err) + } + + // -------------------------------- + // Attempt to push manifest list that refers to an unknown manifest + manifestList := &manifestlist.ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: manifestlist.MediaTypeManifestList, + }, + Manifests: []manifestlist.ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: "amd64", + OS: "linux", + }, + }, + }, + } + + resp := putManifest(t, "putting missing manifest manifestlist", manifestURL, manifestlist.MediaTypeManifestList, manifestList) + defer resp.Body.Close() + checkResponse(t, "putting missing manifest manifestlist", resp, http.StatusBadRequest) + _, p, counts := checkBodyHasErrorCodes(t, "putting missing manifest manifestlist", resp, v2.ErrorCodeManifestBlobUnknown) + + expectedCounts := map[errcode.ErrorCode]int{ + v2.ErrorCodeManifestBlobUnknown: 1, + } + + if !reflect.DeepEqual(counts, expectedCounts) { + t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) + } + + // ------------------- + // Push a manifest list that references an actual manifest + manifestList.Manifests[0].Digest = args.dgst + deserializedManifestList, err := manifestlist.FromDescriptors(manifestList.Manifests) + if err != nil { + t.Fatalf("could not create DeserializedManifestList: %v", err) + } + _, canonical, err := deserializedManifestList.Payload() + if err != nil { + t.Fatalf("could not get manifest list payload: %v", err) + } + dgst := digest.FromBytes(canonical) + + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") + + resp = putManifest(t, "putting manifest list no error", manifestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) + checkResponse(t, "putting manifest list no error", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // -------------------- + // Push by digest -- should get same result + resp = putManifest(t, "putting manifest list by digest", manifestDigestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) + checkResponse(t, "putting manifest list by digest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // ------------------ + // Fetch by tag name + req, err := http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", manifestlist.MediaTypeManifestList) + req.Header.Add("Accept", schema1.MediaTypeSignedManifest) + req.Header.Add("Accept", schema2.MediaTypeManifest) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error fetching manifest list: %v", err) + } + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest list", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestList manifestlist.DeserializedManifestList + dec := json.NewDecoder(resp.Body) + + if err := dec.Decode(&fetchedManifestList); err != nil { + t.Fatalf("error decoding fetched manifest list: %v", err) + } + + _, fetchedCanonical, err := fetchedManifestList.Payload() + if err != nil { + t.Fatalf("error getting manifest list payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifest lists do not match") + } + + // --------------- + // Fetch by digest + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("Accept", manifestlist.MediaTypeManifestList) + resp, err = http.DefaultClient.Do(req) + checkErr(t, err, "fetching manifest list by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching uploaded manifest list", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, + }) + + var fetchedManifestListByDigest manifestlist.DeserializedManifestList + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&fetchedManifestListByDigest); err != nil { + t.Fatalf("error decoding fetched manifest: %v", err) + } + + _, fetchedCanonical, err = fetchedManifestListByDigest.Payload() + if err != nil { + t.Fatalf("error getting manifest list payload: %v", err) + } + + if !bytes.Equal(fetchedCanonical, canonical) { + t.Fatalf("manifests do not match") + } + + // Get by name with etag, gives 304 + etag := resp.Header.Get("Etag") + req, err = http.NewRequest("GET", manifestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) + + // Get by digest with etag, gives 304 + req, err = http.NewRequest("GET", manifestDigestURL, nil) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + req.Header.Set("If-None-Match", etag) + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Error constructing request: %s", err) + } + + checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) + + // ------------------ + // Fetch as a schema1 manifest + resp, err = http.Get(manifestURL) + if err != nil { + t.Fatalf("unexpected error fetching manifest list as schema1: %v", err) + } + defer resp.Body.Close() + + manifestBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) + } + + checkResponse(t, "fetching uploaded manifest list as schema1", resp, http.StatusOK) + + m, desc, err := distribution.UnmarshalManifest(schema1.MediaTypeManifest, manifestBytes) + if err != nil { + t.Fatalf("unexpected error unmarshalling manifest: %v", err) + } + + fetchedSchema1Manifest, ok := m.(*schema1.SignedManifest) + if !ok { + t.Fatalf("expecting schema1 manifest") + } + + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{desc.Digest.String()}, + "ETag": []string{fmt.Sprintf(`"%s"`, desc.Digest)}, + }) + + if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { + t.Fatal("wrong schema version") + } + if fetchedSchema1Manifest.Architecture != "amd64" { + t.Fatal("wrong architecture") + } + if fetchedSchema1Manifest.Name != imageName.Name() { + t.Fatal("wrong image name") + } + if fetchedSchema1Manifest.Tag != tag { + t.Fatal("wrong tag") + } + if len(fetchedSchema1Manifest.FSLayers) != 2 { + t.Fatal("wrong number of FSLayers") + } + layers := args.manifest.(*schema2.DeserializedManifest).Layers + for i := range layers { + if fetchedSchema1Manifest.FSLayers[i].BlobSum != layers[len(layers)-i-1].Digest { + t.Fatalf("blob digest mismatch in schema1 manifest for layer %d", i) + } + } + if len(fetchedSchema1Manifest.History) != 2 { + t.Fatal("wrong number of History entries") + } + + // Don't check V1Compatibility fields because we're using randomly-generated + // layers. +} + +func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { + imageName := args.imageName + dgst := args.dgst + manifest := args.manifest + + ref, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := env.builder.BuildManifestURL(ref) + // --------------- + // Delete by digest + resp, err := httpDelete(manifestDigestURL) + checkErr(t, err, "deleting manifest by digest") + + checkResponse(t, "deleting manifest", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // --------------- + // Attempt to fetch deleted manifest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching deleted manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + + // --------------- + // Delete already deleted manifest by digest + resp, err = httpDelete(manifestDigestURL) + checkErr(t, err, "re-deleting manifest by digest") + + checkResponse(t, "re-deleting manifest", resp, http.StatusNotFound) + + // -------------------- + // Re-upload manifest by digest + resp = putManifest(t, "putting manifest", manifestDigestURL, args.mediaType, manifest) + checkResponse(t, "putting manifest", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // --------------- + // Attempt to fetch re-uploaded deleted digest + resp, err = http.Get(manifestDigestURL) + checkErr(t, err, "fetching re-uploaded manifest by digest") + defer resp.Body.Close() + + checkResponse(t, "fetching re-uploaded manifest", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // --------------- + // Attempt to delete an unknown manifest + unknownDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + unknownRef, _ := reference.WithDigest(imageName, unknownDigest) + unknownManifestDigestURL, err := env.builder.BuildManifestURL(unknownRef) + checkErr(t, err, "building unknown manifest url") + + resp, err = httpDelete(unknownManifestDigestURL) + checkErr(t, err, "delting unknown manifest by digest") + checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) + + // -------------------- + // Upload manifest by tag + tag := "atag" + tagRef, _ := reference.WithTag(imageName, tag) + manifestTagURL, err := env.builder.BuildManifestURL(tagRef) + resp = putManifest(t, "putting manifest by tag", manifestTagURL, args.mediaType, manifest) + checkResponse(t, "putting manifest by tag", resp, http.StatusCreated) + checkHeaders(t, resp, http.Header{ + "Location": []string{manifestDigestURL}, + "Docker-Content-Digest": []string{dgst.String()}, + }) + + tagsURL, err := env.builder.BuildTagsURL(imageName) + if err != nil { + t.Fatalf("unexpected error building tags url: %v", err) + } + + // Ensure that the tag is listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + dec := json.NewDecoder(resp.Body) + var tagsResponse tagsAPIResponse + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 1 { + t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) + } + + if tagsResponse.Tags[0] != tag { + t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) + } + + // --------------- + // Delete by digest + resp, err = httpDelete(manifestDigestURL) + checkErr(t, err, "deleting manifest by digest") + + checkResponse(t, "deleting manifest with tag", resp, http.StatusAccepted) + checkHeaders(t, resp, http.Header{ + "Content-Length": []string{"0"}, + }) + + // Ensure that the tag is not listed. + resp, err = http.Get(tagsURL) + if err != nil { + t.Fatalf("unexpected error getting unknown tags: %v", err) + } + defer resp.Body.Close() + + dec = json.NewDecoder(resp.Body) + if err := dec.Decode(&tagsResponse); err != nil { + t.Fatalf("unexpected error decoding error response: %v", err) + } + + if tagsResponse.Name != imageName.Name() { + t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) + } + + if len(tagsResponse.Tags) != 0 { + t.Fatalf("expected 0 tags in response: %v", tagsResponse.Tags) + } + +} + +type testEnv struct { + pk libtrust.PrivateKey + ctx context.Context + config configuration.Configuration + app *App + server *httptest.Server + builder *v2.URLBuilder +} + +func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, + }, + Proxy: configuration.Proxy{ + RemoteURL: "http://example.com", + }, + } + + return newTestEnvWithConfig(t, &config) + +} + +func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + "delete": configuration.Parameters{"enabled": deleteEnabled}, + }, + } + + config.HTTP.Headers = headerConfig + + return newTestEnvWithConfig(t, &config) +} + +func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *testEnv { + ctx := context.Background() + + app := NewApp(ctx, config) + server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) + builder, err := v2.NewURLBuilderFromString(server.URL+config.HTTP.Prefix, false) + + if err != nil { + t.Fatalf("error creating url builder: %v", err) + } + + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key: %v", err) + } + + return &testEnv{ + pk: pk, + ctx: ctx, + config: *config, + app: app, + server: server, + builder: builder, + } +} + +func putManifest(t *testing.T, msg, url, contentType string, v interface{}) *http.Response { + var body []byte + + switch m := v.(type) { + case *schema1.SignedManifest: + _, pl, err := m.Payload() + if err != nil { + t.Fatalf("error getting payload: %v", err) + } + body = pl + case *manifestlist.DeserializedManifestList: + _, pl, err := m.Payload() + if err != nil { + t.Fatalf("error getting payload: %v", err) + } + body = pl + default: + var err error + body, err = json.MarshalIndent(v, "", " ") + if err != nil { + t.Fatalf("unexpected error marshaling %v: %v", v, err) + } + } + + req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) + if err != nil { + t.Fatalf("error creating request for %s: %v", msg, err) + } + + if contentType != "" { + req.Header.Set("Content-Type", contentType) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("error doing put request while %s: %v", msg, err) + } + + return resp +} + +func startPushLayer(t *testing.T, env *testEnv, name reference.Named) (location string, uuid string) { + layerUploadURL, err := env.builder.BuildBlobUploadURL(name) + if err != nil { + t.Fatalf("unexpected error building layer upload url: %v", err) + } + + u, err := url.Parse(layerUploadURL) + if err != nil { + t.Fatalf("error parsing layer upload URL: %v", err) + } + + base, err := url.Parse(env.server.URL) + if err != nil { + t.Fatalf("error parsing server URL: %v", err) + } + + layerUploadURL = base.ResolveReference(u).String() + resp, err := http.Post(layerUploadURL, "", nil) + if err != nil { + t.Fatalf("unexpected error starting layer push: %v", err) + } + + defer resp.Body.Close() + + checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name.String()), resp, http.StatusAccepted) + + u, err = url.Parse(resp.Header.Get("Location")) + if err != nil { t.Fatalf("error parsing location header: %v", err) } @@ -1204,7 +2019,7 @@ func startPushLayer(t *testing.T, ub *v2.URLBuilder, name string) (location stri // doPushLayer pushes the layer content returning the url on success returning // the response. If you're only expecting a successful response, use pushLayer. -func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) (*http.Response, error) { +func doPushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) (*http.Response, error) { u, err := url.Parse(uploadURLBase) if err != nil { t.Fatalf("unexpected error parsing pushLayer url: %v", err) @@ -1212,7 +2027,6 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Diges u.RawQuery = url.Values{ "_state": u.Query()["_state"], - "digest": []string{dgst.String()}, }.Encode() @@ -1228,7 +2042,7 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Diges } // pushLayer pushes the layer content returning the url on success. -func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string { +func pushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) string { digester := digest.Canonical.New() resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) @@ -1245,7 +2059,8 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, sha256Dgst := digester.Digest() - expectedLayerURL, err := ub.BuildBlobURL(name, sha256Dgst) + ref, _ := reference.WithDigest(name, sha256Dgst) + expectedLayerURL, err := ub.BuildBlobURL(ref) if err != nil { t.Fatalf("error building expected layer url: %v", err) } @@ -1259,7 +2074,7 @@ func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, return resp.Header.Get("Location") } -func finishUpload(t *testing.T, ub *v2.URLBuilder, name string, uploadURLBase string, dgst digest.Digest) string { +func finishUpload(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLBase string, dgst digest.Digest) string { resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, nil) if err != nil { t.Fatalf("unexpected error doing push layer request: %v", err) @@ -1268,7 +2083,8 @@ func finishUpload(t *testing.T, ub *v2.URLBuilder, name string, uploadURLBase st checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) - expectedLayerURL, err := ub.BuildBlobURL(name, dgst) + ref, _ := reference.WithDigest(name, dgst) + expectedLayerURL, err := ub.BuildBlobURL(ref) if err != nil { t.Fatalf("error building expected layer url: %v", err) } @@ -1307,7 +2123,7 @@ func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Resp return resp, digester.Digest(), err } -func pushChunk(t *testing.T, ub *v2.URLBuilder, name string, uploadURLBase string, body io.Reader, length int64) (string, digest.Digest) { +func pushChunk(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLBase string, body io.Reader, length int64) (string, digest.Digest) { resp, dgst, err := doPushChunk(t, uploadURLBase, body) if err != nil { t.Fatalf("unexpected error doing push layer request: %v", err) @@ -1442,7 +2258,12 @@ func checkErr(t *testing.T, err error, msg string) { } } -func createRepository(env *testEnv, t *testing.T, imageName string, tag string) { +func createRepository(env *testEnv, t *testing.T, imageName string, tag string) digest.Digest { + imageNameRef, err := reference.ParseNamed(imageName) + if err != nil { + t.Fatalf("unable to parse reference: %v", err) + } + unsignedManifest := &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, @@ -1466,7 +2287,6 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) for i := range unsignedManifest.FSLayers { rs, dgstStr, err := testutil.CreateRandomTarFile() - if err != nil { t.Fatalf("error creating random layer %d: %v", i, err) } @@ -1474,9 +2294,8 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) expectedLayers[dgst] = rs unsignedManifest.FSLayers[i].BlobSum = dgst - - uploadURLBase, _ := startPushLayer(t, env.builder, imageName) - pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) + uploadURLBase, _ := startPushLayer(t, env, imageNameRef) + pushLayer(t, env.builder, imageNameRef, dgst, uploadURLBase, rs) } signedManifest, err := schema1.Sign(unsignedManifest, env.pk) @@ -1484,21 +2303,24 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) t.Fatalf("unexpected error signing manifest: %v", err) } - payload, err := signedManifest.Payload() - checkErr(t, err, "getting manifest payload") - - dgst, err := digest.FromBytes(payload) - checkErr(t, err, "digesting manifest") + dgst := digest.FromBytes(signedManifest.Canonical) - manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) + // Create this repository by tag to ensure the tag mapping is made in the registry + tagRef, _ := reference.WithTag(imageNameRef, tag) + manifestDigestURL, err := env.builder.BuildManifestURL(tagRef) checkErr(t, err, "building manifest url") - resp := putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) + digestRef, _ := reference.WithDigest(imageNameRef, dgst) + location, err := env.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building location URL") + + resp := putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ - "Location": []string{manifestDigestURL}, + "Location": []string{location}, "Docker-Content-Digest": []string{dgst.String()}, }) + return dgst } // Test mutation operations on a registry configured as a cache. Ensure that they return @@ -1507,9 +2329,10 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { deleteEnabled := true env := newTestEnvMirror(t, deleteEnabled) - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") tag := "latest" - manifestURL, err := env.builder.BuildManifestURL(imageName, tag) + tagRef, _ := reference.WithTag(imageName, tag) + manifestURL, err := env.builder.BuildManifestURL(tagRef) if err != nil { t.Fatalf("unexpected error building base url: %v", err) } @@ -1519,7 +2342,7 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { Versioned: manifest.Versioned{ SchemaVersion: 1, }, - Name: imageName, + Name: imageName.Name(), Tag: tag, FSLayers: []schema1.FSLayer{}, History: []schema1.History{}, @@ -1530,7 +2353,7 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { t.Fatalf("error signing manifest: %v", err) } - resp := putManifest(t, "putting unsigned manifest", manifestURL, sm) + resp := putManifest(t, "putting unsigned manifest", manifestURL, "", sm) checkResponse(t, "putting signed manifest to cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) // Manifest Delete @@ -1552,7 +2375,8 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { checkResponse(t, fmt.Sprintf("starting layer push to cache %v", imageName), resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) // Blob Delete - blobURL, err := env.builder.BuildBlobURL(imageName, digest.DigestSha256EmptyTar) + ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar) + blobURL, err := env.builder.BuildBlobURL(ref) resp, err = httpDelete(blobURL) checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) @@ -1585,3 +2409,66 @@ func TestCheckContextNotifier(t *testing.T) { t.Fatalf("wrong status code - expected 200, got %d", resp.StatusCode) } } + +func TestProxyManifestGetByTag(t *testing.T) { + truthConfig := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + } + truthConfig.HTTP.Headers = headerConfig + + imageName, _ := reference.ParseNamed("foo/bar") + tag := "latest" + + truthEnv := newTestEnvWithConfig(t, &truthConfig) + // create a repository in the truth registry + dgst := createRepository(truthEnv, t, imageName.Name(), tag) + + proxyConfig := configuration.Configuration{ + Storage: configuration.Storage{ + "inmemory": configuration.Parameters{}, + }, + Proxy: configuration.Proxy{ + RemoteURL: truthEnv.server.URL, + }, + } + proxyConfig.HTTP.Headers = headerConfig + + proxyEnv := newTestEnvWithConfig(t, &proxyConfig) + + digestRef, _ := reference.WithDigest(imageName, dgst) + manifestDigestURL, err := proxyEnv.builder.BuildManifestURL(digestRef) + checkErr(t, err, "building manifest url") + + resp, err := http.Get(manifestDigestURL) + checkErr(t, err, "fetching manifest from proxy by digest") + defer resp.Body.Close() + + tagRef, _ := reference.WithTag(imageName, tag) + manifestTagURL, err := proxyEnv.builder.BuildManifestURL(tagRef) + checkErr(t, err, "building manifest url") + + resp, err = http.Get(manifestTagURL) + checkErr(t, err, "fetching manifest from proxy by tag") + defer resp.Body.Close() + checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{dgst.String()}, + }) + + // Create another manifest in the remote with the same image/tag pair + newDigest := createRepository(truthEnv, t, imageName.Name(), tag) + if dgst == newDigest { + t.Fatalf("non-random test data") + } + + // fetch it with the same proxy URL as before. Ensure the updated content is at the same tag + resp, err = http.Get(manifestTagURL) + checkErr(t, err, "fetching manifest from proxy by tag") + defer resp.Body.Close() + checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) + checkHeaders(t, resp, http.Header{ + "Docker-Content-Digest": []string{newDigest.String()}, + }) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go index 6a5a9c44be52..4f67c0321281 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app.go @@ -9,6 +9,7 @@ import ( "net/http" "net/url" "os" + "runtime" "time" log "github.com/Sirupsen/logrus" @@ -18,6 +19,7 @@ import ( "github.com/docker/distribution/health" "github.com/docker/distribution/health/checks" "github.com/docker/distribution/notifications" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" @@ -30,6 +32,8 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" + "github.com/docker/distribution/version" + "github.com/docker/libtrust" "github.com/garyburd/redigo/redis" "github.com/gorilla/mux" "golang.org/x/net/context" @@ -67,22 +71,27 @@ type App struct { redis *redis.Pool - // true if this registry is configured as a pull through cache + // trustKey is a deprecated key used to sign manifests converted to + // schema1 for backward compatibility. It should not be used for any + // other purposes. + trustKey libtrust.PrivateKey + + // isCache is true if this registry is configured as a pull through cache isCache bool - // true if the registry is in a read-only maintenance mode + // readOnly is true if the registry is in a read-only maintenance mode readOnly bool } // NewApp takes a configuration and returns a configured app, ready to serve // requests. The app only implements ServeHTTP and can be wrapped in other // handlers accordingly. -func NewApp(ctx context.Context, configuration *configuration.Configuration) *App { +func NewApp(ctx context.Context, config *configuration.Configuration) *App { app := &App{ - Config: configuration, + Config: config, Context: ctx, - router: v2.RouterWithPrefix(configuration.HTTP.Prefix), - isCache: configuration.Proxy.RemoteURL != "", + router: v2.RouterWithPrefix(config.HTTP.Prefix), + isCache: config.Proxy.RemoteURL != "", } // Register the handler dispatchers. @@ -96,8 +105,15 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) + // override the storage driver's UA string for registry outbound HTTP requests + storageParams := config.Storage.Parameters() + if storageParams == nil { + storageParams = make(configuration.Parameters) + } + storageParams["useragent"] = fmt.Sprintf("docker-distribution/%s %s", version.Version, runtime.Version()) + var err error - app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) + app.driver, err = factory.Create(config.Storage.Type(), storageParams) if err != nil { // TODO(stevvooe): Move the creation of a service into a protected // method, where this is created lazily. Its status can be queried via @@ -106,7 +122,7 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap } purgeConfig := uploadPurgeDefaultConfig() - if mc, ok := configuration.Storage["maintenance"]; ok { + if mc, ok := config.Storage["maintenance"]; ok { if v, ok := mc["uploadpurging"]; ok { purgeConfig, ok = v.(map[interface{}]interface{}) if !ok { @@ -129,43 +145,62 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig) - app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) + app.driver, err = applyStorageMiddleware(app.driver, config.Middleware["storage"]) if err != nil { panic(err) } - app.configureSecret(configuration) - app.configureEvents(configuration) - app.configureRedis(configuration) - app.configureLogHook(configuration) + app.configureSecret(config) + app.configureEvents(config) + app.configureRedis(config) + app.configureLogHook(config) - if configuration.HTTP.Host != "" { - u, err := url.Parse(configuration.HTTP.Host) + if config.Compatibility.Schema1.TrustKey != "" { + app.trustKey, err = libtrust.LoadKeyFile(config.Compatibility.Schema1.TrustKey) + if err != nil { + panic(fmt.Sprintf(`could not load schema1 "signingkey" parameter: %v`, err)) + } + } else { + // Generate an ephemeral key to be used for signing converted manifests + // for clients that don't support schema2. + app.trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + panic(err) + } + } + + if config.HTTP.Host != "" { + u, err := url.Parse(config.HTTP.Host) if err != nil { panic(fmt.Sprintf(`could not parse http "host" parameter: %v`, err)) } app.httpHost = *u } - options := []storage.RegistryOption{} + options := registrymiddleware.GetRegistryOptions() if app.isCache { options = append(options, storage.DisableDigestResumption) } + if config.Compatibility.Schema1.DisableSignatureStore { + options = append(options, storage.DisableSchema1Signatures) + options = append(options, storage.Schema1SigningKey(app.trustKey)) + } + // configure deletion - if d, ok := configuration.Storage["delete"]; ok { + if d, ok := config.Storage["delete"]; ok { e, ok := d["enabled"] if ok { if deleteEnabled, ok := e.(bool); ok && deleteEnabled { - options = append(options, storage.EnableDelete, storage.RemoveParentsOnDelete) + options = append(options, storage.EnableDelete) } } } // configure redirects var redirectDisabled bool - if redirectConfig, ok := configuration.Storage["redirect"]; ok { + if redirectConfig, ok := config.Storage["redirect"]; ok { v := redirectConfig["disable"] switch v := v.(type) { case bool: @@ -181,7 +216,7 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap } // configure storage caches - if cc, ok := configuration.Storage["cache"]; ok { + if cc, ok := config.Storage["cache"]; ok { v, ok := cc["blobdescriptor"] if !ok { // Backwards compatible: "layerinfo" == "blobdescriptor" @@ -210,7 +245,7 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") default: if v != "" { - ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", configuration.Storage["cache"]) + ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", config.Storage["cache"]) } } } @@ -223,15 +258,15 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap } } - app.registry, err = applyRegistryMiddleware(app.Context, app.registry, configuration.Middleware["registry"]) + app.registry, err = applyRegistryMiddleware(app, app.registry, config.Middleware["registry"]) if err != nil { panic(err) } - authType := configuration.Auth.Type() + authType := config.Auth.Type() if authType != "" { - accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters()) + accessController, err := auth.GetAccessController(config.Auth.Type(), config.Auth.Parameters()) if err != nil { panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) } @@ -240,13 +275,13 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap } // configure as a pull through cache - if configuration.Proxy.RemoteURL != "" { - app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, configuration.Proxy) + if config.Proxy.RemoteURL != "" { + app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, config.Proxy) if err != nil { panic(err.Error()) } app.isCache = true - ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", configuration.Proxy.RemoteURL) + ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", config.Proxy.RemoteURL) } return app @@ -356,11 +391,6 @@ func (app *App) register(routeName string, dispatch dispatchFunc) { app.RegisterRoute(app.router.GetRoute(routeName), dispatch, app.nameRequired, NoCustomAccessRecords) } -// Namespace returns a namespace instance representing application's registry storage. -func (app *App) Namespace() distribution.Namespace { - return app.registry -} - func (app *App) RegisterRoute(route *mux.Route, dispatch dispatchFunc, nameRequired nameRequiredFunc, accessRecords customAccessRecordsFunc) { // TODO(stevvooe): This odd dispatcher/route registration is by-product of // some limitations in the gorilla/mux router. We are using it to keep @@ -395,7 +425,7 @@ func (app *App) configureEvents(configuration *configuration.Configuration) { sinks = append(sinks, endpoint) } - // NOTE(stevvooe): Moving to a new queueing implementation is as easy as + // NOTE(stevvooe): Moving to a new queuing implementation is as easy as // replacing broadcaster with a rabbitmq implementation. It's recommended // that the registry instances also act as the workers to keep deployment // simple. @@ -599,10 +629,22 @@ func (app *App) dispatcher(dispatch dispatchFunc, nameRequired nameRequiredFunc, } // Add username to request logging - context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) + context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, auth.UserNameKey)) if nameRequired(r) { - repository, err := app.registry.Repository(context, getName(context)) + nameRef, err := reference.ParseNamed(getName(context)) + if err != nil { + ctxu.GetLogger(context).Errorf("error parsing reference from context: %v", err) + context.Errors = append(context.Errors, distribution.ErrRepositoryNameInvalid{ + Name: getName(context), + Reason: err, + }) + if err := errcode.ServeJSON(w, context.Errors); err != nil { + ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + } + return + } + repository, err := app.registry.Repository(context, nameRef) if err != nil { ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) @@ -625,7 +667,7 @@ func (app *App) dispatcher(dispatch dispatchFunc, nameRequired nameRequiredFunc, repository, app.eventBridge(context, r)) - context.Repository, err = applyRepoMiddleware(context.Context, context.Repository, app.Config.Middleware["repository"]) + context.Repository, err = applyRepoMiddleware(app, context.Repository, app.Config.Middleware["repository"]) if err != nil { ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) @@ -699,9 +741,9 @@ func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { // A "host" item in the configuration takes precedence over // X-Forwarded-Proto and X-Forwarded-Host headers, and the // hostname in the request. - context.urlBuilder = v2.NewURLBuilder(&app.httpHost) + context.urlBuilder = v2.NewURLBuilder(&app.httpHost, false) } else { - context.urlBuilder = v2.NewURLBuilderFromRequest(r) + context.urlBuilder = v2.NewURLBuilderFromRequest(r, app.Config.HTTP.RelativeURLs) } return context @@ -723,8 +765,12 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont if repo != "" { accessRecords = appendAccessRecords(accessRecords, r.Method, repo) + if fromRepo := r.FormValue("from"); fromRepo != "" { + // mounting a blob from one repository to another requires pull (GET) + // access to the source repository. + accessRecords = appendAccessRecords(accessRecords, "GET", fromRepo) + } } - if len(accessRecords) == 0 { // Only allow the name not to be set on the base route. if nameRequired(r) { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go index 9e2514d8e6e9..caa7ab97e7b5 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/app_test.go @@ -48,8 +48,8 @@ func TestAppDispatcher(t *testing.T) { varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { return func(ctx *Context, r *http.Request) http.Handler { // Always checks the same name context - if ctx.Repository.Name() != getName(ctx) { - t.Fatalf("unexpected name: %q != %q", ctx.Repository.Name(), "foo/bar") + if ctx.Repository.Named().Name() != getName(ctx) { + t.Fatalf("unexpected name: %q != %q", ctx.Repository.Named().Name(), "foo/bar") } // Check that we have all that is expected @@ -102,13 +102,6 @@ func TestAppDispatcher(t *testing.T) { "name", "foo/bar", }, }, - { - endpoint: v2.RouteNameBlob, - vars: []string{ - "name", "foo/bar", - "digest", "tarsum.v1+bogus:abcdef0123456789", - }, - }, { endpoint: v2.RouteNameBlobUpload, vars: []string{ @@ -167,7 +160,7 @@ func TestNewApp(t *testing.T) { app := NewApp(ctx, &config) server := httptest.NewServer(app) - builder, err := v2.NewURLBuilderFromString(server.URL) + builder, err := v2.NewURLBuilderFromString(server.URL, false) if err != nil { t.Fatalf("error creating urlbuilder: %v", err) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go index 9cf6f6d1d5d0..673e2c591b17 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/blobupload.go @@ -4,13 +4,14 @@ import ( "fmt" "net/http" "net/url" - "os" "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" ) @@ -44,9 +45,9 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { } buh.State = state - if state.Name != ctx.Repository.Name() { + if state.Name != ctx.Repository.Named().Name() { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Name()) + ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Named().Name()) buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) }) } @@ -74,28 +75,14 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { } buh.Upload = upload - if state.Offset > 0 { - // Seek the blob upload to the correct spot if it's non-zero. - // These error conditions should be rare and demonstrate really - // problems. We basically cancel the upload and tell the client to - // start over. - if nn, err := upload.Seek(buh.State.Offset, os.SEEK_SET); err != nil { - defer upload.Close() - ctxu.GetLogger(ctx).Infof("error seeking blob upload: %v", err) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - upload.Cancel(buh) - }) - } else if nn != buh.State.Offset { - defer upload.Close() - ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, buh.State.Offset) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) - upload.Cancel(buh) - }) - } + if size := upload.Size(); size != buh.State.Offset { + defer upload.Close() + ctxu.GetLogger(ctx).Infof("upload resumed at wrong offest: %d != %d", size, buh.State.Offset) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) + upload.Cancel(buh) + }) } - return closeResources(handler, buh.Upload) } @@ -116,13 +103,29 @@ type blobUploadHandler struct { } // StartBlobUpload begins the blob upload process and allocates a server-side -// blob writer session. +// blob writer session, optionally mounting the blob from a separate repository. func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { + var options []distribution.BlobCreateOption + + fromRepo := r.FormValue("from") + mountDigest := r.FormValue("mount") + + if mountDigest != "" && fromRepo != "" { + opt, err := buh.createBlobMountOption(fromRepo, mountDigest) + if opt != nil && err == nil { + options = append(options, opt) + } + } + blobs := buh.Repository.Blobs(buh) - upload, err := blobs.Create(buh) + upload, err := blobs.Create(buh, options...) if err != nil { - if err == distribution.ErrUnsupported { + if ebm, ok := err.(distribution.ErrBlobMounted); ok { + if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + } else if err == distribution.ErrUnsupported { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) } else { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) @@ -221,10 +224,7 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht return } - size := buh.State.Offset - if offset, err := buh.Upload.Seek(0, os.SEEK_CUR); err == nil { - size = offset - } + size := buh.Upload.Size() desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ Digest: dgst, @@ -262,18 +262,10 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht return } - - // Build our canonical blob url - blobURL, err := buh.urlBuilder.BuildBlobURL(buh.Repository.Name(), desc.Digest) - if err != nil { + if err := buh.writeBlobCreatedHeaders(w, desc); err != nil { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } - - w.Header().Set("Location", blobURL) - w.Header().Set("Content-Length", "0") - w.Header().Set("Docker-Content-Digest", desc.Digest.String()) - w.WriteHeader(http.StatusCreated) } // CancelBlobUpload cancels an in-progress upload of a blob. @@ -298,21 +290,10 @@ func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Re // uploads always start at a 0 offset. This allows disabling resumable push by // always returning a 0 offset on check status. func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { - - var offset int64 - if !fresh { - var err error - offset, err = buh.Upload.Seek(0, os.SEEK_CUR) - if err != nil { - ctxu.GetLogger(buh).Errorf("unable get current offset of blob upload: %v", err) - return err - } - } - // TODO(stevvooe): Need a better way to manage the upload state automatically. - buh.State.Name = buh.Repository.Name() + buh.State.Name = buh.Repository.Named().Name() buh.State.UUID = buh.Upload.ID() - buh.State.Offset = offset + buh.State.Offset = buh.Upload.Size() buh.State.StartedAt = buh.Upload.StartedAt() token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State) @@ -322,7 +303,7 @@ func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http. } uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL( - buh.Repository.Name(), buh.Upload.ID(), + buh.Repository.Named(), buh.Upload.ID(), url.Values{ "_state": []string{token}, }) @@ -331,15 +312,58 @@ func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http. return err } - endRange := offset + endRange := buh.Upload.Size() if endRange > 0 { endRange = endRange - 1 } w.Header().Set("Docker-Upload-UUID", buh.UUID) w.Header().Set("Location", uploadURL) + w.Header().Set("Content-Length", "0") w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) return nil } + +// mountBlob attempts to mount a blob from another repository by its digest. If +// successful, the blob is linked into the blob store and 201 Created is +// returned with the canonical url of the blob. +func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string) (distribution.BlobCreateOption, error) { + dgst, err := digest.ParseDigest(mountDigest) + if err != nil { + return nil, err + } + + ref, err := reference.ParseNamed(fromRepo) + if err != nil { + return nil, err + } + + canonical, err := reference.WithDigest(ref, dgst) + if err != nil { + return nil, err + } + + return storage.WithMountFrom(canonical), nil +} + +// writeBlobCreatedHeaders writes the standard headers describing a newly +// created blob. A 201 Created is written as well as the canonical URL and +// blob digest. +func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc distribution.Descriptor) error { + ref, err := reference.WithDigest(buh.Repository.Named(), desc.Digest) + if err != nil { + return err + } + blobURL, err := buh.urlBuilder.BuildBlobURL(ref) + if err != nil { + return err + } + + w.Header().Set("Location", blobURL) + w.Header().Set("Content-Length", "0") + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + w.WriteHeader(http.StatusCreated) + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go index 85a171237595..552db2df627c 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/context.go @@ -10,6 +10,7 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" "golang.org/x/net/context" ) @@ -77,7 +78,7 @@ func getUploadUUID(ctx context.Context) (uuid string) { // getUserName attempts to resolve a username from the context and request. If // a username cannot be resolved, the empty string is returned. func getUserName(ctx context.Context, r *http.Request) string { - username := ctxu.GetStringValue(ctx, "auth.user.name") + username := ctxu.GetStringValue(ctx, auth.UserNameKey) // Fallback to request user with basic auth if username == "" { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go index 5a3c99841831..b56c15668ab4 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/helpers.go @@ -35,7 +35,7 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr // Read in the data, if any. copied, err := io.Copy(destWriter, r.Body) if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { - // Didn't recieve as much content as expected. Did the client + // Didn't receive as much content as expected. Did the client // disconnect during the request? If so, avoid returning a 400 // error to keep the logs cleaner. select { @@ -46,7 +46,11 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr // instead of showing 0 for the HTTP status. responseWriter.WriteHeader(499) - ctxu.GetLogger(context).Error("client disconnected during " + action) + ctxu.GetLoggerWithFields(context, map[interface{}]interface{}{ + "error": err, + "copied": copied, + "contentLength": r.ContentLength, + }, "error", "copied", "contentLength").Error("client disconnected during " + action) return errors.New("client disconnected") default: } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go index 3720c7bae993..5f2d885592ed 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/images.go @@ -2,19 +2,26 @@ package handlers import ( "bytes" - "encoding/json" "fmt" "net/http" - "strings" "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" - "golang.org/x/net/context" +) + +// These constants determine which architecture and OS to choose from a +// manifest list when downconverting it to a schema1 manifest. +const ( + defaultArch = "amd64" + defaultOS = "linux" ) // imageManifestDispatcher takes the request context and builds the @@ -33,7 +40,8 @@ func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { } mhandler := handlers.MethodHandler{ - "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), + "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), + "HEAD": http.HandlerFunc(imageManifestHandler.GetImageManifest), } if !ctx.readOnly { @@ -62,42 +70,139 @@ func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http return } - var sm *schema1.SignedManifest + var manifest distribution.Manifest if imh.Tag != "" { - sm, err = manifests.GetByTag(imh.Tag) - } else { - if etagMatch(r, imh.Digest.String()) { - w.WriteHeader(http.StatusNotModified) + tags := imh.Repository.Tags(imh) + desc, err := tags.Get(imh, imh.Tag) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) return } - sm, err = manifests.Get(imh.Digest) + imh.Digest = desc.Digest } + if etagMatch(r, imh.Digest.String()) { + w.WriteHeader(http.StatusNotModified) + return + } + + var options []distribution.ManifestServiceOption + if imh.Tag != "" { + options = append(options, distribution.WithTag(imh.Tag)) + } + manifest, err = manifests.Get(imh, imh.Digest, options...) if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) return } - // Get the digest, if we don't already have it. - if imh.Digest == "" { - dgst, err := digestManifest(imh, sm) + supportsSchema2 := false + supportsManifestList := false + if acceptHeaders, ok := r.Header["Accept"]; ok { + for _, mediaType := range acceptHeaders { + if mediaType == schema2.MediaTypeManifest { + supportsSchema2 = true + } + if mediaType == manifestlist.MediaTypeManifestList { + supportsManifestList = true + } + } + } + + schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) + manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) + + // Only rewrite schema2 manifests when they are being fetched by tag. + // If they are being fetched by digest, we can't return something not + // matching the digest. + if imh.Tag != "" && isSchema2 && !supportsSchema2 { + // Rewrite manifest in schema1 format + ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) + + manifest, err = imh.convertSchema2Manifest(schema2Manifest) if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) return } - if etagMatch(r, dgst.String()) { - w.WriteHeader(http.StatusNotModified) + } else if imh.Tag != "" && isManifestList && !supportsManifestList { + // Rewrite manifest in schema1 format + ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) + + // Find the image manifest corresponding to the default + // platform + var manifestDigest digest.Digest + for _, manifestDescriptor := range manifestList.Manifests { + if manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS { + manifestDigest = manifestDescriptor.Digest + break + } + } + + if manifestDigest == "" { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) + return + } + + manifest, err = manifests.Get(imh, manifestDigest) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) return } - imh.Digest = dgst + // If necessary, convert the image manifest + if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supportsSchema2 { + manifest, err = imh.convertSchema2Manifest(schema2Manifest) + if err != nil { + return + } + } } - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.Header().Set("Content-Length", fmt.Sprint(len(sm.Raw))) + ct, p, err := manifest.Payload() + if err != nil { + return + } + + w.Header().Set("Content-Type", ct) + w.Header().Set("Content-Length", fmt.Sprint(len(p))) w.Header().Set("Docker-Content-Digest", imh.Digest.String()) w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) - w.Write(sm.Raw) + w.Write(p) +} + +func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) { + targetDescriptor := schema2Manifest.Target() + blobs := imh.Repository.Blobs(imh) + configJSON, err := blobs.Get(imh, targetDescriptor.Digest) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + + ref := imh.Repository.Named() + + if imh.Tag != "" { + ref, err = reference.WithTag(ref, imh.Tag) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(err)) + return nil, err + } + } + + builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON) + for _, d := range schema2Manifest.References() { + if err := builder.AppendReference(d); err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + } + manifest, err := builder.Build(imh) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) + return nil, err + } + imh.Digest = digest.FromBytes(manifest.(*schema1.SignedManifest).Canonical) + + return manifest, nil } func etagMatch(r *http.Request, etag string) bool { @@ -109,7 +214,7 @@ func etagMatch(r *http.Request, etag string) bool { return false } -// PutImageManifest validates and stores and image in the registry. +// PutImageManifest validates and stores an image in the registry. func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("PutImageManifest") manifests, err := imh.Repository.Manifests(imh) @@ -124,39 +229,32 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http return } - var manifest schema1.SignedManifest - if err := json.Unmarshal(jsonBuf.Bytes(), &manifest); err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) - return - } - - dgst, err := digestManifest(imh, &manifest) + mediaType := r.Header.Get("Content-Type") + manifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes()) if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return } - // Validate manifest tag or digest matches payload - if imh.Tag != "" { - if manifest.Tag != imh.Tag { - ctxu.GetLogger(imh).Errorf("invalid tag on manifest payload: %q != %q", manifest.Tag, imh.Tag) - imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid) - return - } - - imh.Digest = dgst - } else if imh.Digest != "" { - if dgst != imh.Digest { - ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", dgst, imh.Digest) + if imh.Digest != "" { + if desc.Digest != imh.Digest { + ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", desc.Digest, imh.Digest) imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) return } + } else if imh.Tag != "" { + imh.Digest = desc.Digest } else { imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified")) return } - if err := manifests.Put(&manifest); err != nil { + var options []distribution.ManifestServiceOption + if imh.Tag != "" { + options = append(options, distribution.WithTag(imh.Tag)) + } + _, err = manifests.Put(imh, manifest, options...) + if err != nil { // TODO(stevvooe): These error handling switches really need to be // handled by an app global mapper. if err == distribution.ErrUnsupported { @@ -192,8 +290,25 @@ func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http return } + // Tag this manifest + if imh.Tag != "" { + tags := imh.Repository.Tags(imh) + err = tags.Tag(imh, imh.Tag, desc) + if err != nil { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + } + // Construct a canonical url for the uploaded manifest. - location, err := imh.urlBuilder.BuildManifestURL(imh.Repository.Name(), imh.Digest.String()) + ref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest) + if err != nil { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + return + } + + location, err := imh.urlBuilder.BuildManifestURL(ref) if err != nil { // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to // happen. We'll log the error here but proceed as if it worked. Worst @@ -216,7 +331,7 @@ func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *h return } - err = manifests.Delete(imh.Digest) + err = manifests.Delete(imh, imh.Digest) if err != nil { switch err { case digest.ErrDigestUnsupported: @@ -235,30 +350,19 @@ func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *h } } - w.WriteHeader(http.StatusAccepted) -} - -// digestManifest takes a digest of the given manifest. This belongs somewhere -// better but we'll wait for a refactoring cycle to find that real somewhere. -func digestManifest(ctx context.Context, sm *schema1.SignedManifest) (digest.Digest, error) { - p, err := sm.Payload() + tagService := imh.Repository.Tags(imh) + referencedTags, err := tagService.Lookup(imh, distribution.Descriptor{Digest: imh.Digest}) if err != nil { - if !strings.Contains(err.Error(), "missing signature key") { - ctxu.GetLogger(ctx).Errorf("error getting manifest payload: %v", err) - return "", err - } - - // NOTE(stevvooe): There are no signatures but we still have a - // payload. The request will fail later but this is not the - // responsibility of this part of the code. - p = sm.Raw + imh.Errors = append(imh.Errors, err) + return } - dgst, err := digest.FromBytes(p) - if err != nil { - ctxu.GetLogger(ctx).Errorf("error digesting manifest: %v", err) - return "", err + for _, tag := range referencedTags { + if err := tagService.Untag(imh, tag); err != nil { + imh.Errors = append(imh.Errors, err) + return + } } - return dgst, err + w.WriteHeader(http.StatusAccepted) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go index 547255857cc3..fd661e663a5d 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/handlers/tags.go @@ -34,17 +34,13 @@ type tagsAPIResponse struct { // GetTags returns a json list of tags for a specific image name. func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - manifests, err := th.Repository.Manifests(th) - if err != nil { - th.Errors = append(th.Errors, err) - return - } - tags, err := manifests.Tags() + tagService := th.Repository.Tags(th) + tags, err := tagService.All(th) if err != nil { switch err := err.(type) { case distribution.ErrRepositoryUnknown: - th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Name()})) + th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Named().Name()})) default: th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } @@ -55,7 +51,7 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { enc := json.NewEncoder(w) if err := enc.Encode(tagsAPIResponse{ - Name: th.Repository.Name(), + Name: th.Repository.Named().Name(), Tags: tags, }); err != nil { th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go deleted file mode 100644 index b93a7a63ffcc..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/listener/listener.go +++ /dev/null @@ -1,74 +0,0 @@ -package listener - -import ( - "fmt" - "net" - "os" - "time" -) - -// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted -// connections. It's used by ListenAndServe and ListenAndServeTLS so -// dead TCP connections (e.g. closing laptop mid-download) eventually -// go away. -// it is a plain copy-paste from net/http/server.go -type tcpKeepAliveListener struct { - *net.TCPListener -} - -func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { - tc, err := ln.AcceptTCP() - if err != nil { - return - } - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(3 * time.Minute) - return tc, nil -} - -// NewListener announces on laddr and net. Accepted values of the net are -// 'unix' and 'tcp' -func NewListener(net, laddr string) (net.Listener, error) { - switch net { - case "unix": - return newUnixListener(laddr) - case "tcp", "": // an empty net means tcp - return newTCPListener(laddr) - default: - return nil, fmt.Errorf("unknown address type %s", net) - } -} - -func newUnixListener(laddr string) (net.Listener, error) { - fi, err := os.Stat(laddr) - if err == nil { - // the file exists. - // try to remove it if it's a socket - if !isSocket(fi.Mode()) { - return nil, fmt.Errorf("file %s exists and is not a socket", laddr) - } - - if err := os.Remove(laddr); err != nil { - return nil, err - } - } else if !os.IsNotExist(err) { - // we can't do stat on the file. - // it means we can not remove it - return nil, err - } - - return net.Listen("unix", laddr) -} - -func isSocket(m os.FileMode) bool { - return m&os.ModeSocket != 0 -} - -func newTCPListener(laddr string) (net.Listener, error) { - ln, err := net.Listen("tcp", laddr) - if err != nil { - return nil, err - } - - return tcpKeepAliveListener{ln.(*net.TCPListener)}, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go index 7535c6db5ee4..3e6e5cc71a92 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/middleware/registry/middleware.go @@ -5,6 +5,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage" ) // InitFunc is the type of a RegistryMiddleware factory function and is @@ -12,6 +13,7 @@ import ( type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) var middlewares map[string]InitFunc +var registryoptions []storage.RegistryOption // Register is used to register an InitFunc for // a RegistryMiddleware backend with the given name. @@ -38,3 +40,15 @@ func Get(ctx context.Context, name string, options map[string]interface{}, regis return nil, fmt.Errorf("no registry middleware registered with name: %s", name) } + +// RegisterOptions adds more options to RegistryOption list. Options get applied before +// any other configuration-based options. +func RegisterOptions(options ...storage.RegistryOption) error { + registryoptions = append(registryoptions, options...) + return nil +} + +// GetRegistryOptions returns list of RegistryOption. +func GetRegistryOptions() []storage.RegistryOption { + return registryoptions +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go index e4bec75a5edc..a9cc43a61577 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyauth.go @@ -8,6 +8,7 @@ import ( ) const tokenURL = "https://auth.docker.io/token" +const challengeHeader = "Docker-Distribution-Api-Version" type userpass struct { username string @@ -24,12 +25,15 @@ func (c credentials) Basic(u *url.URL) (string, string) { return up.username, up.password } -// ConfigureAuth authorizes with the upstream registry -func ConfigureAuth(remoteURL, username, password string, cm auth.ChallengeManager) (auth.CredentialStore, error) { - if err := ping(cm, remoteURL+"/v2/", "Docker-Distribution-Api-Version"); err != nil { - return nil, err - } +func (c credentials) RefreshToken(u *url.URL, service string) string { + return "" +} + +func (c credentials) SetRefreshToken(u *url.URL, service, token string) { +} +// configureAuth stores credentials for challenge responses +func configureAuth(username, password string) (auth.CredentialStore, error) { creds := map[string]userpass{ tokenURL: { username: username, diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go index 741e2ad074ff..7a6d7ea27e43 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore.go @@ -10,6 +10,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" ) @@ -17,9 +18,11 @@ import ( const blobTTL = time.Duration(24 * 7 * time.Hour) type proxyBlobStore struct { - localStore distribution.BlobStore - remoteStore distribution.BlobService - scheduler *scheduler.TTLExpirationScheduler + localStore distribution.BlobStore + remoteStore distribution.BlobService + scheduler *scheduler.TTLExpirationScheduler + repositoryName reference.Named + authChallenger authChallenger } var _ distribution.BlobStore = &proxyBlobStore{} @@ -119,6 +122,10 @@ func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, return nil } + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return err + } + mu.Lock() _, ok := inflight[dgst] if ok { @@ -133,7 +140,14 @@ func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, if err := pbs.storeLocal(ctx, dgst); err != nil { context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) } - pbs.scheduler.AddBlob(dgst.String(), repositoryTTL) + + blobRef, err := reference.WithDigest(pbs.repositoryName, dgst) + if err != nil { + context.GetLogger(ctx).Errorf("Error creating reference: %s", err) + return + } + + pbs.scheduler.AddBlob(blobRef, repositoryTTL) }(dgst) _, err = pbs.copyContent(ctx, dgst, w) @@ -153,15 +167,41 @@ func (pbs *proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distri return distribution.Descriptor{}, err } + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return distribution.Descriptor{}, err + } + return pbs.remoteStore.Stat(ctx, dgst) } +func (pbs *proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + blob, err := pbs.localStore.Get(ctx, dgst) + if err == nil { + return blob, nil + } + + if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil { + return []byte{}, err + } + + blob, err = pbs.remoteStore.Get(ctx, dgst) + if err != nil { + return []byte{}, err + } + + _, err = pbs.localStore.Put(ctx, "", blob) + if err != nil { + return []byte{}, err + } + return blob, nil +} + // Unsupported functions func (pbs *proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { return distribution.Descriptor{}, distribution.ErrUnsupported } -func (pbs *proxyBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { +func (pbs *proxyBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { return nil, distribution.ErrUnsupported } @@ -169,18 +209,14 @@ func (pbs *proxyBlobStore) Resume(ctx context.Context, id string) (distribution. return nil, distribution.ErrUnsupported } -func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - return nil, distribution.ErrUnsupported +func (pbs *proxyBlobStore) Mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { + return distribution.Descriptor{}, distribution.ErrUnsupported } -func (pbs *proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { +func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { return nil, distribution.ErrUnsupported } -func (pbs *proxyBlobStore) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error { - return distribution.ErrUnsupported -} - func (pbs *proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { return distribution.ErrUnsupported } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go index ee8acca53e99..967dcd3d2154 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyblobstore_test.go @@ -12,6 +12,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache/memory" @@ -42,17 +43,12 @@ func (sbs statsBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, return sbs.blobs.Get(ctx, dgst) } -func (sbs statsBlobStore) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error { - sbs.stats["enumerate"]++ - return sbs.blobs.Enumerate(ctx, ingester) -} - -func (sbs statsBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { +func (sbs statsBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { sbsMu.Lock() sbs.stats["create"]++ sbsMu.Unlock() - return sbs.blobs.Create(ctx) + return sbs.blobs.Create(ctx, options...) } func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { @@ -119,6 +115,11 @@ func (te *testEnv) RemoteStats() *map[string]int { // Populate remote store and record the digests func makeTestEnv(t *testing.T, name string) *testEnv { + nameRef, err := reference.ParseNamed(name) + if err != nil { + t.Fatalf("unable to parse reference: %s", err) + } + ctx := context.Background() truthDir, err := ioutil.TempDir("", "truth") @@ -131,21 +132,35 @@ func makeTestEnv(t *testing.T, name string) *testEnv { t.Fatalf("unable to create tempdir: %s", err) } + localDriver, err := filesystem.FromParameters(map[string]interface{}{ + "rootdirectory": truthDir, + }) + if err != nil { + t.Fatalf("unable to create filesystem driver: %s", err) + } + // todo: create a tempfile area here - localRegistry, err := storage.NewRegistry(ctx, filesystem.New(truthDir), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) + localRegistry, err := storage.NewRegistry(ctx, localDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) if err != nil { t.Fatalf("error creating registry: %v", err) } - localRepo, err := localRegistry.Repository(ctx, name) + localRepo, err := localRegistry.Repository(ctx, nameRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } - truthRegistry, err := storage.NewRegistry(ctx, filesystem.New(cacheDir), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) + cacheDriver, err := filesystem.FromParameters(map[string]interface{}{ + "rootdirectory": cacheDir, + }) + if err != nil { + t.Fatalf("unable to create filesystem driver: %s", err) + } + + truthRegistry, err := storage.NewRegistry(ctx, cacheDriver, storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) if err != nil { t.Fatalf("error creating registry: %v", err) } - truthRepo, err := truthRegistry.Repository(ctx, name) + truthRepo, err := truthRegistry.Repository(ctx, nameRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } @@ -163,9 +178,11 @@ func makeTestEnv(t *testing.T, name string) *testEnv { s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") proxyBlobStore := proxyBlobStore{ - remoteStore: truthBlobs, - localStore: localBlobs, - scheduler: s, + repositoryName: nameRef, + remoteStore: truthBlobs, + localStore: localBlobs, + scheduler: s, + authChallenger: &mockChallenger{}, } te := &testEnv{ @@ -215,6 +232,40 @@ func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) { te.inRemote = inRemote te.numUnique = numUnique } +func TestProxyStoreGet(t *testing.T) { + te := makeTestEnv(t, "foo/bar") + + localStats := te.LocalStats() + remoteStats := te.RemoteStats() + + populate(t, te, 1, 10, 1) + _, err := te.store.Get(te.ctx, te.inRemote[0].Digest) + if err != nil { + t.Fatal(err) + } + + if (*localStats)["get"] != 1 && (*localStats)["put"] != 1 { + t.Errorf("Unexpected local counts") + } + + if (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected remote get count") + } + + _, err = te.store.Get(te.ctx, te.inRemote[0].Digest) + if err != nil { + t.Fatal(err) + } + + if (*localStats)["get"] != 2 && (*localStats)["put"] != 1 { + t.Errorf("Unexpected local counts") + } + + if (*remoteStats)["get"] != 1 { + t.Errorf("Unexpected remote get count") + } + +} func TestProxyStoreStat(t *testing.T) { te := makeTestEnv(t, "foo/bar") @@ -240,6 +291,11 @@ func TestProxyStoreStat(t *testing.T) { if (*remoteStats)["stat"] != remoteBlobCount { t.Errorf("Unexpected remote stat count") } + + if te.store.authChallenger.(*mockChallenger).count != len(te.inRemote) { + t.Fatalf("Unexpected auth challenge count, got %#v", te.store.authChallenger) + } + } func TestProxyStoreServeHighConcurrency(t *testing.T) { @@ -303,10 +359,7 @@ func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) { } bodyBytes := w.Body.Bytes() - localDigest, err := digest.FromBytes(bodyBytes) - if err != nil { - t.Fatalf("Error making digest from blob") - } + localDigest := digest.FromBytes(bodyBytes) if localDigest != remoteBlob.Digest { t.Fatalf("Mismatching blob fetch from proxy") } @@ -340,10 +393,7 @@ func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) { t.Fatalf(err.Error()) } - dl, err := digest.FromBytes(w.Body.Bytes()) - if err != nil { - t.Fatalf("Error making digest from blob") - } + dl := digest.FromBytes(w.Body.Bytes()) if dl != dr.Digest { t.Errorf("Mismatching blob fetch from proxy") } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go index 5e14e8a0f7e2..f08e285db670 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore.go @@ -6,8 +6,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" ) @@ -18,141 +17,79 @@ type proxyManifestStore struct { ctx context.Context localManifests distribution.ManifestService remoteManifests distribution.ManifestService - repositoryName string + repositoryName reference.Named scheduler *scheduler.TTLExpirationScheduler + authChallenger authChallenger } var _ distribution.ManifestService = &proxyManifestStore{} -func (pms proxyManifestStore) Exists(dgst digest.Digest) (bool, error) { - exists, err := pms.localManifests.Exists(dgst) +func (pms proxyManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + exists, err := pms.localManifests.Exists(ctx, dgst) if err != nil { return false, err } if exists { return true, nil } - - return pms.remoteManifests.Exists(dgst) -} - -func (pms proxyManifestStore) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { - sm, err := pms.localManifests.Get(dgst) - if err == nil { - proxyMetrics.ManifestPush(uint64(len(sm.Raw))) - return sm, err - } - - sm, err = pms.remoteManifests.Get(dgst) - if err != nil { - return nil, err - } - - proxyMetrics.ManifestPull(uint64(len(sm.Raw))) - err = pms.localManifests.Put(sm) - if err != nil { - return nil, err - } - - // Schedule the repo for removal - pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) - - // Ensure the manifest blob is cleaned up - pms.scheduler.AddBlob(dgst.String(), repositoryTTL) - - proxyMetrics.ManifestPush(uint64(len(sm.Raw))) - - return sm, err -} - -func (pms proxyManifestStore) Tags() ([]string, error) { - return pms.localManifests.Tags() -} - -func (pms proxyManifestStore) ExistsByTag(tag string) (bool, error) { - exists, err := pms.localManifests.ExistsByTag(tag) - if err != nil { + if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil { return false, err } - if exists { - return true, nil - } - - return pms.remoteManifests.ExistsByTag(tag) + return pms.remoteManifests.Exists(ctx, dgst) } -func (pms proxyManifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { - var localDigest digest.Digest - - localManifest, err := pms.localManifests.GetByTag(tag, options...) - switch err.(type) { - case distribution.ErrManifestUnknown, distribution.ErrManifestUnknownRevision: - goto fromremote - case nil: - break - default: - return nil, err - } - - localDigest, err = manifestDigest(localManifest) +func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + // At this point `dgst` was either specified explicitly, or returned by the + // tagstore with the most recent association. + var fromRemote bool + manifest, err := pms.localManifests.Get(ctx, dgst, options...) if err != nil { - return nil, err - } - -fromremote: - var sm *schema1.SignedManifest - sm, err = pms.remoteManifests.GetByTag(tag, client.AddEtagToTag(tag, localDigest.String())) - if err != nil && err != distribution.ErrManifestNotModified { - return nil, err - } - - if err == distribution.ErrManifestNotModified { - context.GetLogger(pms.ctx).Debugf("Local manifest for %q is latest, dgst=%s", tag, localDigest.String()) - return localManifest, nil + if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil { + return nil, err + } + + manifest, err = pms.remoteManifests.Get(ctx, dgst, options...) + if err != nil { + return nil, err + } + fromRemote = true } - context.GetLogger(pms.ctx).Debugf("Updated manifest for %q, dgst=%s", tag, localDigest.String()) - err = pms.localManifests.Put(sm) + _, payload, err := manifest.Payload() if err != nil { return nil, err } - dgst, err := manifestDigest(sm) - if err != nil { - return nil, err - } - pms.scheduler.AddBlob(dgst.String(), repositoryTTL) - pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) + proxyMetrics.ManifestPush(uint64(len(payload))) + if fromRemote { + proxyMetrics.ManifestPull(uint64(len(payload))) - proxyMetrics.ManifestPull(uint64(len(sm.Raw))) - proxyMetrics.ManifestPush(uint64(len(sm.Raw))) + _, err = pms.localManifests.Put(ctx, manifest) + if err != nil { + return nil, err + } - return sm, err -} + // Schedule the manifest blob for removal + repoBlob, err := reference.WithDigest(pms.repositoryName, dgst) + if err != nil { + context.GetLogger(ctx).Errorf("Error creating reference: %s", err) + return nil, err + } -func manifestDigest(sm *schema1.SignedManifest) (digest.Digest, error) { - payload, err := sm.Payload() - if err != nil { - return "", err + pms.scheduler.AddManifest(repoBlob, repositoryTTL) + // Ensure the manifest blob is cleaned up + //pms.scheduler.AddBlob(blobRef, repositoryTTL) } - dgst, err := digest.FromBytes(payload) - if err != nil { - return "", err - } - - return dgst, nil + return manifest, err } -func (pms proxyManifestStore) Put(manifest *schema1.SignedManifest) error { - return distribution.ErrUnsupported +func (pms proxyManifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + var d digest.Digest + return d, distribution.ErrUnsupported } -func (pms proxyManifestStore) Delete(dgst digest.Digest) error { +func (pms proxyManifestStore) Delete(ctx context.Context, dgst digest.Digest) error { return distribution.ErrUnsupported } - -func (pms proxyManifestStore) Enumerate() ([]digest.Digest, error) { - return nil, distribution.ErrUnsupported -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go index 1c6d7527d9cd..1069d66c8d55 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxymanifeststore_test.go @@ -2,6 +2,7 @@ package proxy import ( "io" + "sync" "testing" "github.com/docker/distribution" @@ -9,6 +10,8 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache/memory" @@ -37,53 +40,66 @@ func (te manifestStoreTestEnv) RemoteStats() *map[string]int { return &rs } -func (sm statsManifest) Delete(dgst digest.Digest) error { +func (sm statsManifest) Delete(ctx context.Context, dgst digest.Digest) error { sm.stats["delete"]++ - return sm.manifests.Delete(dgst) + return sm.manifests.Delete(ctx, dgst) } -func (sm statsManifest) Exists(dgst digest.Digest) (bool, error) { +func (sm statsManifest) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { sm.stats["exists"]++ - return sm.manifests.Exists(dgst) + return sm.manifests.Exists(ctx, dgst) } -func (sm statsManifest) ExistsByTag(tag string) (bool, error) { - sm.stats["existbytag"]++ - return sm.manifests.ExistsByTag(tag) -} - -func (sm statsManifest) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { +func (sm statsManifest) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { sm.stats["get"]++ - return sm.manifests.Get(dgst) + return sm.manifests.Get(ctx, dgst) } -func (sm statsManifest) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { - sm.stats["getbytag"]++ - return sm.manifests.GetByTag(tag, options...) +func (sm statsManifest) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + sm.stats["put"]++ + return sm.manifests.Put(ctx, manifest) } -func (sm statsManifest) Enumerate() ([]digest.Digest, error) { +/*func (sm statsManifest) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { sm.stats["enumerate"]++ - return sm.manifests.Enumerate() + return sm.manifests.Enumerate(ctx, manifests, last) } +*/ -func (sm statsManifest) Put(manifest *schema1.SignedManifest) error { - sm.stats["put"]++ - return sm.manifests.Put(manifest) +type mockChallenger struct { + sync.Mutex + count int +} + +// Called for remote operations only +func (m *mockChallenger) tryEstablishChallenges(context.Context) error { + m.Lock() + defer m.Unlock() + + m.count++ + return nil } -func (sm statsManifest) Tags() ([]string, error) { - sm.stats["tags"]++ - return sm.manifests.Tags() +func (m *mockChallenger) credentialStore() auth.CredentialStore { + return nil +} + +func (m *mockChallenger) challengeManager() auth.ChallengeManager { + return nil } func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { + nameRef, err := reference.ParseNamed(name) + if err != nil { + t.Fatalf("unable to parse reference: %s", err) + } + ctx := context.Background() truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) if err != nil { t.Fatalf("error creating registry: %v", err) } - truthRepo, err := truthRegistry.Repository(ctx, name) + truthRepo, err := truthRegistry.Repository(ctx, nameRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } @@ -105,7 +121,7 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE if err != nil { t.Fatalf("error creating registry: %v", err) } - localRepo, err := localRegistry.Repository(ctx, name) + localRepo, err := localRegistry.Repository(ctx, nameRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } @@ -127,6 +143,8 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE localManifests: localManifests, remoteManifests: truthManifests, scheduler: s, + repositoryName: nameRef, + authChallenger: &mockChallenger{}, }, } } @@ -174,15 +192,12 @@ func populateRepo(t *testing.T, ctx context.Context, repository distribution.Rep if err != nil { t.Fatalf(err.Error()) } - ms.Put(sm) + dgst, err := ms.Put(ctx, sm) if err != nil { t.Fatalf("unexpected errors putting manifest: %v", err) } - pl, err := sm.Payload() - if err != nil { - t.Fatal(err) - } - return digest.FromBytes(pl) + + return dgst, nil } // TestProxyManifests contains basic acceptance tests @@ -194,24 +209,30 @@ func TestProxyManifests(t *testing.T) { localStats := env.LocalStats() remoteStats := env.RemoteStats() + ctx := context.Background() // Stat - must check local and remote - exists, err := env.manifests.ExistsByTag("latest") + exists, err := env.manifests.Exists(ctx, env.manifestDigest) if err != nil { - t.Fatalf("Error checking existance") + t.Fatalf("Error checking existence") } if !exists { t.Errorf("Unexpected non-existant manifest") } - if (*localStats)["existbytag"] != 1 && (*remoteStats)["existbytag"] != 1 { - t.Errorf("Unexpected exists count") + if (*localStats)["exists"] != 1 && (*remoteStats)["exists"] != 1 { + t.Errorf("Unexpected exists count : \n%v \n%v", localStats, remoteStats) + } + + if env.manifests.authChallenger.(*mockChallenger).count != 1 { + t.Fatalf("Expected 1 auth challenge, got %#v", env.manifests.authChallenger) } // Get - should succeed and pull manifest into local - _, err = env.manifests.Get(env.manifestDigest) + _, err = env.manifests.Get(ctx, env.manifestDigest) if err != nil { t.Fatal(err) } + if (*localStats)["get"] != 1 && (*remoteStats)["get"] != 1 { t.Errorf("Unexpected get count") } @@ -220,8 +241,12 @@ func TestProxyManifests(t *testing.T) { t.Errorf("Expected local put") } + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) + } + // Stat - should only go to local - exists, err = env.manifests.ExistsByTag("latest") + exists, err = env.manifests.Exists(ctx, env.manifestDigest) if err != nil { t.Fatal(err) } @@ -229,19 +254,22 @@ func TestProxyManifests(t *testing.T) { t.Errorf("Unexpected non-existant manifest") } - if (*localStats)["existbytag"] != 2 && (*remoteStats)["existbytag"] != 1 { + if (*localStats)["exists"] != 2 && (*remoteStats)["exists"] != 1 { t.Errorf("Unexpected exists count") + } + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) } - // Get - should get from remote, to test freshness - _, err = env.manifests.Get(env.manifestDigest) + // Get proxied - won't require another authchallenge + _, err = env.manifests.Get(ctx, env.manifestDigest) if err != nil { t.Fatal(err) } - if (*remoteStats)["get"] != 2 && (*remoteStats)["existsbytag"] != 1 && (*localStats)["put"] != 1 { - t.Errorf("Unexpected get count") + if env.manifests.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go index af1e7c9e947f..56818dabca4c 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxyregistry.go @@ -1,12 +1,15 @@ package proxy import ( + "fmt" "net/http" "net/url" + "sync" "github.com/docker/distribution" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/transport" @@ -17,48 +20,91 @@ import ( // proxyingRegistry fetches content from a remote registry and caches it locally type proxyingRegistry struct { - embedded distribution.Namespace // provides local registry functionality - - scheduler *scheduler.TTLExpirationScheduler - - remoteURL string - credentialStore auth.CredentialStore - challengeManager auth.ChallengeManager + embedded distribution.Namespace // provides local registry functionality + scheduler *scheduler.TTLExpirationScheduler + remoteURL url.URL + authChallenger authChallenger } // NewRegistryPullThroughCache creates a registry acting as a pull through cache func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) { - _, err := url.Parse(config.RemoteURL) + remoteURL, err := url.Parse(config.RemoteURL) if err != nil { return nil, err } v := storage.NewVacuum(ctx, driver) - s := scheduler.New(ctx, driver, "/scheduler-state.json") - s.OnBlobExpire(func(digest string) error { - return v.RemoveBlob(digest) + s.OnBlobExpire(func(ref reference.Reference) error { + var r reference.Canonical + var ok bool + if r, ok = ref.(reference.Canonical); !ok { + return fmt.Errorf("unexpected reference type : %T", ref) + } + + repo, err := registry.Repository(ctx, r) + if err != nil { + return err + } + + blobs := repo.Blobs(ctx) + + // Clear the repository reference and descriptor caches + err = blobs.Delete(ctx, r.Digest()) + if err != nil { + return err + } + + err = v.RemoveBlob(r.Digest().String()) + if err != nil { + return err + } + + return nil }) - s.OnManifestExpire(func(repoName string) error { - return v.RemoveRepository(repoName) + + s.OnManifestExpire(func(ref reference.Reference) error { + var r reference.Canonical + var ok bool + if r, ok = ref.(reference.Canonical); !ok { + return fmt.Errorf("unexpected reference type : %T", ref) + } + + repo, err := registry.Repository(ctx, r) + if err != nil { + return err + } + + manifests, err := repo.Manifests(ctx) + if err != nil { + return err + } + err = manifests.Delete(ctx, r.Digest()) + if err != nil { + return err + } + return nil }) + err = s.Start() if err != nil { return nil, err } - challengeManager := auth.NewSimpleChallengeManager() - cs, err := ConfigureAuth(config.RemoteURL, config.Username, config.Password, challengeManager) + cs, err := configureAuth(config.Username, config.Password) if err != nil { return nil, err } return &proxyingRegistry{ - embedded: registry, - scheduler: s, - challengeManager: challengeManager, - credentialStore: cs, - remoteURL: config.RemoteURL, + embedded: registry, + scheduler: s, + remoteURL: *remoteURL, + authChallenger: &remoteAuthChallenger{ + remoteURL: *remoteURL, + cm: auth.NewSimpleChallengeManager(), + cs: cs, + }, }, nil } @@ -70,20 +116,22 @@ func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, la return pr.embedded.Repositories(ctx, repos, last) } -func (pr *proxyingRegistry) Repository(ctx context.Context, name string) (distribution.Repository, error) { +func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named) (distribution.Repository, error) { + c := pr.authChallenger + tr := transport.NewTransport(http.DefaultTransport, - auth.NewAuthorizer(pr.challengeManager, auth.NewTokenHandler(http.DefaultTransport, pr.credentialStore, name, "pull"))) + auth.NewAuthorizer(c.challengeManager(), auth.NewTokenHandler(http.DefaultTransport, c.credentialStore(), name.Name(), "pull"))) localRepo, err := pr.embedded.Repository(ctx, name) if err != nil { return nil, err } - localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification) + localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification()) if err != nil { return nil, err } - remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL, tr) + remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL.String(), tr) if err != nil { return nil, err } @@ -95,39 +143,95 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name string) (distri return &proxiedRepository{ blobStore: &proxyBlobStore{ - localStore: localRepo.Blobs(ctx), - remoteStore: remoteRepo.Blobs(ctx), - scheduler: pr.scheduler, + localStore: localRepo.Blobs(ctx), + remoteStore: remoteRepo.Blobs(ctx), + scheduler: pr.scheduler, + repositoryName: name, + authChallenger: pr.authChallenger, }, - manifests: proxyManifestStore{ + manifests: &proxyManifestStore{ repositoryName: name, localManifests: localManifests, // Options? remoteManifests: remoteManifests, ctx: ctx, scheduler: pr.scheduler, + authChallenger: pr.authChallenger, + }, + name: name, + tags: &proxyTagService{ + localTags: localRepo.Tags(ctx), + remoteTags: remoteRepo.Tags(ctx), + authChallenger: pr.authChallenger, }, - name: name, - signatures: localRepo.Signatures(), }, nil } -// Blobs returns a blob service for local blob store. -func (pr *proxyingRegistry) Blobs() distribution.BlobService { +func (pr *proxyingRegistry) Blobs() distribution.BlobEnumerator { return pr.embedded.Blobs() } +func (pr *proxyingRegistry) BlobStatter() distribution.BlobStatter { + return pr.embedded.BlobStatter() +} + +// authChallenger encapsulates a request to the upstream to establish credential challenges +type authChallenger interface { + tryEstablishChallenges(context.Context) error + challengeManager() auth.ChallengeManager + credentialStore() auth.CredentialStore +} + +type remoteAuthChallenger struct { + remoteURL url.URL + sync.Mutex + cm auth.ChallengeManager + cs auth.CredentialStore +} + +func (r *remoteAuthChallenger) credentialStore() auth.CredentialStore { + return r.cs +} + +func (r *remoteAuthChallenger) challengeManager() auth.ChallengeManager { + return r.cm +} + +// tryEstablishChallenges will attempt to get a challenge type for the upstream if none currently exist +func (r *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error { + r.Lock() + defer r.Unlock() + + remoteURL := r.remoteURL + remoteURL.Path = "/v2/" + challenges, err := r.cm.GetChallenges(r.remoteURL) + if err != nil { + return err + } + + if len(challenges) > 0 { + return nil + } + + // establish challenge type with upstream + if err := ping(r.cm, remoteURL.String(), challengeHeader); err != nil { + return err + } + + context.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, r.cm) + return nil +} + // proxiedRepository uses proxying blob and manifest services to serve content // locally, or pulling it through from a remote and caching it locally if it doesn't // already exist type proxiedRepository struct { - blobStore distribution.BlobStore - manifests distribution.ManifestService - name string - signatures distribution.SignatureService + blobStore distribution.BlobStore + manifests distribution.ManifestService + name reference.Named + tags distribution.TagService } func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - // options return pr.manifests, nil } @@ -135,10 +239,10 @@ func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore { return pr.blobStore } -func (pr *proxiedRepository) Name() string { +func (pr *proxiedRepository) Named() reference.Named { return pr.name } -func (pr *proxiedRepository) Signatures() distribution.SignatureService { - return pr.signatures +func (pr *proxiedRepository) Tags(ctx context.Context) distribution.TagService { + return pr.tags } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxytagservice.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxytagservice.go new file mode 100644 index 000000000000..a8273030df6c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxytagservice.go @@ -0,0 +1,65 @@ +package proxy + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +// proxyTagService supports local and remote lookup of tags. +type proxyTagService struct { + localTags distribution.TagService + remoteTags distribution.TagService + authChallenger authChallenger +} + +var _ distribution.TagService = proxyTagService{} + +// Get attempts to get the most recent digest for the tag by checking the remote +// tag service first and then caching it locally. If the remote is unavailable +// the local association is returned +func (pt proxyTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + err := pt.authChallenger.tryEstablishChallenges(ctx) + if err == nil { + desc, err := pt.remoteTags.Get(ctx, tag) + if err == nil { + err := pt.localTags.Tag(ctx, tag, desc) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + } + + desc, err := pt.localTags.Get(ctx, tag) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil +} + +func (pt proxyTagService) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + return distribution.ErrUnsupported +} + +func (pt proxyTagService) Untag(ctx context.Context, tag string) error { + err := pt.localTags.Untag(ctx, tag) + if err != nil { + return err + } + return nil +} + +func (pt proxyTagService) All(ctx context.Context) ([]string, error) { + err := pt.authChallenger.tryEstablishChallenges(ctx) + if err == nil { + tags, err := pt.remoteTags.All(ctx) + if err == nil { + return tags, err + } + } + return pt.localTags.All(ctx) +} + +func (pt proxyTagService) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + return []string{}, distribution.ErrUnsupported +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxytagservice_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxytagservice_test.go new file mode 100644 index 000000000000..ce0fe78ba96a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/proxytagservice_test.go @@ -0,0 +1,182 @@ +package proxy + +import ( + "reflect" + "sort" + "sync" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +type mockTagStore struct { + mapping map[string]distribution.Descriptor + sync.Mutex +} + +var _ distribution.TagService = &mockTagStore{} + +func (m *mockTagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + m.Lock() + defer m.Unlock() + + if d, ok := m.mapping[tag]; ok { + return d, nil + } + return distribution.Descriptor{}, distribution.ErrTagUnknown{} +} + +func (m *mockTagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + m.Lock() + defer m.Unlock() + + m.mapping[tag] = desc + return nil +} + +func (m *mockTagStore) Untag(ctx context.Context, tag string) error { + m.Lock() + defer m.Unlock() + + if _, ok := m.mapping[tag]; ok { + delete(m.mapping, tag) + return nil + } + return distribution.ErrTagUnknown{} +} + +func (m *mockTagStore) All(ctx context.Context) ([]string, error) { + m.Lock() + defer m.Unlock() + + var tags []string + for tag := range m.mapping { + tags = append(tags, tag) + } + + return tags, nil +} + +func (m *mockTagStore) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func testProxyTagService(local, remote map[string]distribution.Descriptor) *proxyTagService { + if local == nil { + local = make(map[string]distribution.Descriptor) + } + if remote == nil { + remote = make(map[string]distribution.Descriptor) + } + return &proxyTagService{ + localTags: &mockTagStore{mapping: local}, + remoteTags: &mockTagStore{mapping: remote}, + authChallenger: &mockChallenger{}, + } +} + +func TestGet(t *testing.T) { + remoteDesc := distribution.Descriptor{Size: 42} + remoteTag := "remote" + proxyTags := testProxyTagService(map[string]distribution.Descriptor{remoteTag: remoteDesc}, nil) + + ctx := context.Background() + + // Get pre-loaded tag + d, err := proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + if proxyTags.authChallenger.(*mockChallenger).count != 1 { + t.Fatalf("Expected 1 auth challenge call, got %#v", proxyTags.authChallenger) + } + + if !reflect.DeepEqual(d, remoteDesc) { + t.Fatal("unable to get put tag") + } + + local, err := proxyTags.localTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("remote tag not pulled into store") + } + + if !reflect.DeepEqual(local, remoteDesc) { + t.Fatalf("unexpected descriptor pulled through") + } + + // Manually overwrite remote tag + newRemoteDesc := distribution.Descriptor{Size: 43} + err = proxyTags.remoteTags.Tag(ctx, remoteTag, newRemoteDesc) + if err != nil { + t.Fatal(err) + } + + d, err = proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + if proxyTags.authChallenger.(*mockChallenger).count != 2 { + t.Fatalf("Expected 2 auth challenge calls, got %#v", proxyTags.authChallenger) + } + + if !reflect.DeepEqual(d, newRemoteDesc) { + t.Fatal("unable to get put tag") + } + + _, err = proxyTags.localTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("remote tag not pulled into store") + } + + // untag, ensure it's removed locally, but present in remote + err = proxyTags.Untag(ctx, remoteTag) + if err != nil { + t.Fatal(err) + } + + _, err = proxyTags.localTags.Get(ctx, remoteTag) + if err == nil { + t.Fatalf("Expected error getting Untag'd tag") + } + + _, err = proxyTags.remoteTags.Get(ctx, remoteTag) + if err != nil { + t.Fatalf("remote tag should not be untagged with proxyTag.Untag") + } + + _, err = proxyTags.Get(ctx, remoteTag) + if err != nil { + t.Fatal("untagged tag should be pulled through") + } + + if proxyTags.authChallenger.(*mockChallenger).count != 3 { + t.Fatalf("Expected 3 auth challenge calls, got %#v", proxyTags.authChallenger) + } + + // Add another tag. Ensure both tags appear in 'All' + err = proxyTags.remoteTags.Tag(ctx, "funtag", distribution.Descriptor{Size: 42}) + if err != nil { + t.Fatal(err) + } + + all, err := proxyTags.All(ctx) + if err != nil { + t.Fatal(err) + } + + if len(all) != 2 { + t.Fatalf("Unexpected tag length returned from All() : %d ", len(all)) + } + + sort.Strings(all) + if all[0] != "funtag" && all[1] != "remote" { + t.Fatalf("Unexpected tags returned from All() : %v ", all) + } + + if proxyTags.authChallenger.(*mockChallenger).count != 4 { + t.Fatalf("Expected 4 auth challenge calls, got %#v", proxyTags.authChallenger) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go index e91920a1dd73..0c8a85348002 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler.go @@ -7,11 +7,12 @@ import ( "time" "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" ) // onTTLExpiryFunc is called when a repository's TTL expires -type expiryFunc func(string) error +type expiryFunc func(reference.Reference) error const ( entryTypeBlob = iota @@ -80,19 +81,20 @@ func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { } // AddBlob schedules a blob cleanup after ttl expires -func (ttles *TTLExpirationScheduler) AddBlob(dgst string, ttl time.Duration) error { +func (ttles *TTLExpirationScheduler) AddBlob(blobRef reference.Canonical, ttl time.Duration) error { ttles.Lock() defer ttles.Unlock() if ttles.stopped { return fmt.Errorf("scheduler not started") } - ttles.add(dgst, ttl, entryTypeBlob) + + ttles.add(blobRef, ttl, entryTypeBlob) return nil } // AddManifest schedules a manifest cleanup after ttl expires -func (ttles *TTLExpirationScheduler) AddManifest(repoName string, ttl time.Duration) error { +func (ttles *TTLExpirationScheduler) AddManifest(manifestRef reference.Canonical, ttl time.Duration) error { ttles.Lock() defer ttles.Unlock() @@ -100,7 +102,7 @@ func (ttles *TTLExpirationScheduler) AddManifest(repoName string, ttl time.Durat return fmt.Errorf("scheduler not started") } - ttles.add(repoName, ttl, entryTypeManifest) + ttles.add(manifestRef, ttl, entryTypeManifest) return nil } @@ -154,17 +156,17 @@ func (ttles *TTLExpirationScheduler) Start() error { return nil } -func (ttles *TTLExpirationScheduler) add(key string, ttl time.Duration, eType int) { +func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duration, eType int) { entry := &schedulerEntry{ - Key: key, + Key: r.String(), Expiry: time.Now().Add(ttl), EntryType: eType, } context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) - if oldEntry, present := ttles.entries[key]; present && oldEntry.timer != nil { + if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil { oldEntry.timer.Stop() } - ttles.entries[key] = entry + ttles.entries[entry.Key] = entry entry.timer = ttles.startTimer(entry, ttl) ttles.indexDirty = true } @@ -182,13 +184,18 @@ func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time. case entryTypeManifest: f = ttles.onManifestExpire default: - f = func(repoName string) error { - return fmt.Errorf("Unexpected scheduler entry type") + f = func(reference.Reference) error { + return fmt.Errorf("scheduler entry type") } } - if err := f(entry.Key); err != nil { - context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) + ref, err := reference.Parse(entry.Key) + if err == nil { + if err := f(ref); err != nil { + context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) + } + } else { + context.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err) } delete(ttles.entries, entry.Key) @@ -247,6 +254,5 @@ func (ttles *TTLExpirationScheduler) readState() error { if err != nil { return err } - return nil } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go index 00072ed2c946..556f52045bde 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/proxy/scheduler/scheduler_test.go @@ -6,28 +6,49 @@ import ( "time" "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver/inmemory" ) +func testRefs(t *testing.T) (reference.Reference, reference.Reference, reference.Reference) { + ref1, err := reference.Parse("testrepo@sha256:aaaaeaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + ref2, err := reference.Parse("testrepo@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + ref3, err := reference.Parse("testrepo@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + + return ref1, ref2, ref3 +} + func TestSchedule(t *testing.T) { + ref1, ref2, ref3 := testRefs(t) timeUnit := time.Millisecond remainingRepos := map[string]bool{ - "testBlob1": true, - "testBlob2": true, - "ch00": true, + ref1.String(): true, + ref2.String(): true, + ref3.String(): true, } s := New(context.Background(), inmemory.New(), "/ttl") - deleteFunc := func(repoName string) error { + deleteFunc := func(repoName reference.Reference) error { if len(remainingRepos) == 0 { t.Fatalf("Incorrect expiry count") } - _, ok := remainingRepos[repoName] + _, ok := remainingRepos[repoName.String()] if !ok { - t.Fatalf("Trying to remove nonexistant repo: %s", repoName) + t.Fatalf("Trying to remove nonexistent repo: %s", repoName) } t.Log("removing", repoName) - delete(remainingRepos, repoName) + delete(remainingRepos, repoName.String()) return nil } @@ -37,11 +58,11 @@ func TestSchedule(t *testing.T) { t.Fatalf("Error starting ttlExpirationScheduler: %s", err) } - s.add("testBlob1", 3*timeUnit, entryTypeBlob) - s.add("testBlob2", 1*timeUnit, entryTypeBlob) + s.add(ref1, 3*timeUnit, entryTypeBlob) + s.add(ref2, 1*timeUnit, entryTypeBlob) func() { - s.add("ch00", 1*timeUnit, entryTypeBlob) + s.add(ref3, 1*timeUnit, entryTypeBlob) }() @@ -53,33 +74,34 @@ func TestSchedule(t *testing.T) { } func TestRestoreOld(t *testing.T) { + ref1, ref2, _ := testRefs(t) remainingRepos := map[string]bool{ - "testBlob1": true, - "oldRepo": true, + ref1.String(): true, + ref2.String(): true, } - deleteFunc := func(repoName string) error { - if repoName == "oldRepo" && len(remainingRepos) == 3 { - t.Errorf("oldRepo should be removed first") + deleteFunc := func(r reference.Reference) error { + if r.String() == ref1.String() && len(remainingRepos) == 2 { + t.Errorf("ref1 should be removed first") } - _, ok := remainingRepos[repoName] + _, ok := remainingRepos[r.String()] if !ok { - t.Fatalf("Trying to remove nonexistant repo: %s", repoName) + t.Fatalf("Trying to remove nonexistent repo: %s", r) } - delete(remainingRepos, repoName) + delete(remainingRepos, r.String()) return nil } timeUnit := time.Millisecond serialized, err := json.Marshal(&map[string]schedulerEntry{ - "testBlob1": { + ref1.String(): { Expiry: time.Now().Add(1 * timeUnit), - Key: "testBlob1", + Key: ref1.String(), EntryType: 0, }, - "oldRepo": { + ref2.String(): { Expiry: time.Now().Add(-3 * timeUnit), // TTL passed, should be removed first - Key: "oldRepo", + Key: ref2.String(), EntryType: 0, }, }) @@ -108,13 +130,16 @@ func TestRestoreOld(t *testing.T) { } func TestStopRestore(t *testing.T) { + ref1, ref2, _ := testRefs(t) + timeUnit := time.Millisecond remainingRepos := map[string]bool{ - "testBlob1": true, - "testBlob2": true, + ref1.String(): true, + ref2.String(): true, } - deleteFunc := func(repoName string) error { - delete(remainingRepos, repoName) + + deleteFunc := func(r reference.Reference) error { + delete(remainingRepos, r.String()) return nil } @@ -127,8 +152,8 @@ func TestStopRestore(t *testing.T) { if err != nil { t.Fatalf(err.Error()) } - s.add("testBlob1", 300*timeUnit, entryTypeBlob) - s.add("testBlob2", 100*timeUnit, entryTypeBlob) + s.add(ref1, 300*timeUnit, entryTypeBlob) + s.add(ref2, 100*timeUnit, entryTypeBlob) // Start and stop before all operations complete // state will be written to fs diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/registry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/registry.go deleted file mode 100644 index 86cb6a173cb1..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/registry.go +++ /dev/null @@ -1,337 +0,0 @@ -package registry - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" - "os" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/formatters/logstash" - "github.com/bugsnag/bugsnag-go" - "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" - "github.com/docker/distribution/health" - "github.com/docker/distribution/registry/handlers" - "github.com/docker/distribution/registry/listener" - "github.com/docker/distribution/uuid" - "github.com/docker/distribution/version" - gorhandlers "github.com/gorilla/handlers" - "github.com/spf13/cobra" - "github.com/yvasiyarov/gorelic" -) - -// Cmd is a cobra command for running the registry. -var Cmd = &cobra.Command{ - Use: "registry ", - Short: "registry stores and distributes Docker images", - Long: "registry stores and distributes Docker images.", - Run: func(cmd *cobra.Command, args []string) { - if showVersion { - version.PrintVersion() - return - } - - // setup context - ctx := context.WithVersion(context.Background(), version.Version) - - config, err := resolveConfiguration(args) - if err != nil { - fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) - cmd.Usage() - os.Exit(1) - } - - if config.HTTP.Debug.Addr != "" { - go func(addr string) { - log.Infof("debug server listening %v", addr) - if err := http.ListenAndServe(addr, nil); err != nil { - log.Fatalf("error listening on debug interface: %v", err) - } - }(config.HTTP.Debug.Addr) - } - - registry, err := NewRegistry(ctx, config) - if err != nil { - log.Fatalln(err) - } - - if err = registry.ListenAndServe(); err != nil { - log.Fatalln(err) - } - }, -} - -var showVersion bool - -func init() { - Cmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") -} - -// A Registry represents a complete instance of the registry. -// TODO(aaronl): It might make sense for Registry to become an interface. -type Registry struct { - config *configuration.Configuration - app *handlers.App - server *http.Server -} - -// NewRegistry creates a new registry from a context and configuration struct. -func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { - var err error - ctx, err = configureLogging(ctx, config) - if err != nil { - return nil, fmt.Errorf("error configuring logger: %v", err) - } - - // inject a logger into the uuid library. warns us if there is a problem - // with uuid generation under low entropy. - uuid.Loggerf = context.GetLogger(ctx).Warnf - - app := handlers.NewApp(ctx, config) - // TODO(aaronl): The global scope of the health checks means NewRegistry - // can only be called once per process. - app.RegisterHealthChecks() - handler := configureReporting(app) - handler = alive("/", handler) - handler = health.Handler(handler) - handler = panicHandler(handler) - handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) - - server := &http.Server{ - Handler: handler, - } - - return &Registry{ - app: app, - config: config, - server: server, - }, nil -} - -// ListenAndServe runs the registry's HTTP server. -func (registry *Registry) ListenAndServe() error { - config := registry.config - - ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) - if err != nil { - return err - } - - if config.HTTP.TLS.Certificate != "" { - tlsConf := &tls.Config{ - ClientAuth: tls.NoClientCert, - NextProtos: []string{"http/1.1"}, - Certificates: make([]tls.Certificate, 1), - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - }, - } - - tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) - if err != nil { - return err - } - - if len(config.HTTP.TLS.ClientCAs) != 0 { - pool := x509.NewCertPool() - - for _, ca := range config.HTTP.TLS.ClientCAs { - caPem, err := ioutil.ReadFile(ca) - if err != nil { - return err - } - - if ok := pool.AppendCertsFromPEM(caPem); !ok { - return fmt.Errorf("Could not add CA to pool") - } - } - - for _, subj := range pool.Subjects() { - context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) - } - - tlsConf.ClientAuth = tls.RequireAndVerifyClientCert - tlsConf.ClientCAs = pool - } - - ln = tls.NewListener(ln, tlsConf) - context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) - } else { - context.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) - } - - return registry.server.Serve(ln) -} - -func configureReporting(app *handlers.App) http.Handler { - var handler http.Handler = app - - if app.Config.Reporting.Bugsnag.APIKey != "" { - bugsnagConfig := bugsnag.Configuration{ - APIKey: app.Config.Reporting.Bugsnag.APIKey, - // TODO(brianbland): provide the registry version here - // AppVersion: "2.0", - } - if app.Config.Reporting.Bugsnag.ReleaseStage != "" { - bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage - } - if app.Config.Reporting.Bugsnag.Endpoint != "" { - bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint - } - bugsnag.Configure(bugsnagConfig) - - handler = bugsnag.Handler(handler) - } - - if app.Config.Reporting.NewRelic.LicenseKey != "" { - agent := gorelic.NewAgent() - agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey - if app.Config.Reporting.NewRelic.Name != "" { - agent.NewrelicName = app.Config.Reporting.NewRelic.Name - } - agent.CollectHTTPStat = true - agent.Verbose = app.Config.Reporting.NewRelic.Verbose - agent.Run() - - handler = agent.WrapHTTPHandler(handler) - } - - return handler -} - -// configureLogging prepares the context with a logger using the -// configuration. -func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { - if config.Log.Level == "" && config.Log.Formatter == "" { - // If no config for logging is set, fallback to deprecated "Loglevel". - log.SetLevel(logLevel(config.Loglevel)) - ctx = context.WithLogger(ctx, context.GetLogger(ctx)) - return ctx, nil - } - - log.SetLevel(logLevel(config.Log.Level)) - - formatter := config.Log.Formatter - if formatter == "" { - formatter = "text" // default formatter - } - - switch formatter { - case "json": - log.SetFormatter(&log.JSONFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - case "text": - log.SetFormatter(&log.TextFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - case "logstash": - log.SetFormatter(&logstash.LogstashFormatter{ - TimestampFormat: time.RFC3339Nano, - }) - default: - // just let the library use default on empty string. - if config.Log.Formatter != "" { - return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) - } - } - - if config.Log.Formatter != "" { - log.Debugf("using %q logging formatter", config.Log.Formatter) - } - - if len(config.Log.Fields) > 0 { - // build up the static fields, if present. - var fields []interface{} - for k := range config.Log.Fields { - fields = append(fields, k) - } - - ctx = context.WithValues(ctx, config.Log.Fields) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) - } - - return ctx, nil -} - -func logLevel(level configuration.Loglevel) log.Level { - l, err := log.ParseLevel(string(level)) - if err != nil { - l = log.InfoLevel - log.Warnf("error parsing level %q: %v, using %q ", level, err, l) - } - - return l -} - -// panicHandler add a HTTP handler to web app. The handler recover the happening -// panic. logrus.Panic transmits panic message to pre-config log hooks, which is -// defined in config.yml. -func panicHandler(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer func() { - if err := recover(); err != nil { - log.Panic(fmt.Sprintf("%v", err)) - } - }() - handler.ServeHTTP(w, r) - }) -} - -// alive simply wraps the handler with a route that always returns an http 200 -// response when the path is matched. If the path is not matched, the request -// is passed to the provided handler. There is no guarantee of anything but -// that the server is up. Wrap with other handlers (such as health.Handler) -// for greater affect. -func alive(path string, handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == path { - w.Header().Set("Cache-Control", "no-cache") - w.WriteHeader(http.StatusOK) - return - } - - handler.ServeHTTP(w, r) - }) -} - -func resolveConfiguration(args []string) (*configuration.Configuration, error) { - var configurationPath string - - if len(args) > 0 { - configurationPath = args[0] - } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { - configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") - } - - if configurationPath == "" { - return nil, fmt.Errorf("configuration path unspecified") - } - - fp, err := os.Open(configurationPath) - if err != nil { - return nil, err - } - - defer fp.Close() - - config, err := configuration.Parse(fp) - if err != nil { - return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) - } - - return config, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blob_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blob_test.go index 38d5141ae2c3..f7ae70f12d81 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blob_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blob_test.go @@ -2,40 +2,64 @@ package storage import ( "bytes" - "crypto/rand" "crypto/sha256" "fmt" "io" "io/ioutil" - mrand "math/rand" "os" "path" - "strings" + "reflect" "testing" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" ) +// TestWriteSeek tests that the current file size can be +// obtained using Seek +func TestWriteSeek(t *testing.T) { + ctx := context.Background() + imageName, _ := reference.ParseNamed("foo/bar") + driver := inmemory.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + bs := repository.Blobs(ctx) + + blobUpload, err := bs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + contents := []byte{1, 2, 3} + blobUpload.Write(contents) + offset := blobUpload.Size() + if offset != int64(len(contents)) { + t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents)) + } + +} + // TestSimpleBlobUpload covers the blob upload process, exercising common // error paths that might be seen during an upload. func TestSimpleBlobUpload(t *testing.T) { - randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() + randomDataReader, dgst, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random reader: %v", err) } - dgst := digest.Digest(tarSumStr) - if err != nil { - t.Fatalf("error allocating upload store: %v", err) - } - ctx := context.Background() - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { @@ -61,6 +85,15 @@ func TestSimpleBlobUpload(t *testing.T) { t.Fatalf("unexpected error during upload cancellation: %v", err) } + // get the enclosing directory + uploadPath := path.Dir(blobUpload.(*blobWriter).path) + + // ensure state was cleaned up + _, err = driver.List(ctx, uploadPath) + if err == nil { + t.Fatal("files in upload path after cleanup") + } + // Do a resume, get unknown upload blobUpload, err = bs.Resume(ctx, blobUpload.ID()) if err != distribution.ErrBlobUploadUnknown { @@ -88,11 +121,7 @@ func TestSimpleBlobUpload(t *testing.T) { t.Fatalf("layer data write incomplete") } - offset, err := blobUpload.Seek(0, os.SEEK_CUR) - if err != nil { - t.Fatalf("unexpected error seeking layer upload: %v", err) - } - + offset := blobUpload.Size() if offset != nn { t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn) } @@ -110,6 +139,13 @@ func TestSimpleBlobUpload(t *testing.T) { t.Fatalf("unexpected error finishing layer upload: %v", err) } + // ensure state was cleaned up + uploadPath = path.Dir(blobUpload.(*blobWriter).path) + _, err = driver.List(ctx, uploadPath) + if err == nil { + t.Fatal("files in upload path after commit") + } + // After finishing an upload, it should no longer exist. if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown { t.Fatalf("expected layer upload to be unknown, got %v", err) @@ -121,7 +157,7 @@ func TestSimpleBlobUpload(t *testing.T) { t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) } - if statDesc != desc { + if !reflect.DeepEqual(statDesc, desc) { t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) } @@ -145,13 +181,10 @@ func TestSimpleBlobUpload(t *testing.T) { t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest) } - checkBlobParentPath(t, ctx, driver, "", desc.Digest, true) - checkBlobParentPath(t, ctx, driver, imageName, desc.Digest, true) - // Delete a blob err = bs.Delete(ctx, desc.Digest) if err != nil { - t.Fatalf("Unexpected error deleting blob: %v", err) + t.Fatalf("Unexpected error deleting blob") } d, err := bs.Stat(ctx, desc.Digest) @@ -159,9 +192,6 @@ func TestSimpleBlobUpload(t *testing.T) { t.Fatalf("unexpected non-error stating deleted blob: %v", d) } - checkBlobParentPath(t, ctx, driver, "", desc.Digest, true) - checkBlobParentPath(t, ctx, driver, imageName, desc.Digest, true) - switch err { case distribution.ErrBlobUnknown: break @@ -186,10 +216,7 @@ func TestSimpleBlobUpload(t *testing.T) { if err != nil { t.Fatalf("Error reading all of blob %s", err.Error()) } - expectedDigest, err := digest.FromBytes(randomBlob) - if err != nil { - t.Fatalf("Error getting digest from bytes: %s", err) - } + expectedDigest := digest.FromBytes(randomBlob) simpleUpload(t, bs, randomBlob, expectedDigest) d, err = bs.Stat(ctx, expectedDigest) @@ -226,7 +253,7 @@ func TestSimpleBlobUpload(t *testing.T) { // other tests. func TestSimpleBlobRead(t *testing.T) { ctx := context.Background() - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { @@ -238,13 +265,11 @@ func TestSimpleBlobRead(t *testing.T) { } bs := repository.Blobs(ctx) - randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. + randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. if err != nil { t.Fatalf("error creating random data: %v", err) } - dgst := digest.Digest(tarSumStr) - // Test for existence. desc, err := bs.Stat(ctx, dgst) if err != distribution.ErrBlobUnknown { @@ -330,402 +355,180 @@ func TestSimpleBlobRead(t *testing.T) { } } -// TestLayerUploadZeroLength uploads zero-length -func TestLayerUploadZeroLength(t *testing.T) { +// TestBlobMount covers the blob mount process, exercising common +// error paths that might be seen during a mount. +func TestBlobMount(t *testing.T) { + randomDataReader, dgst, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random reader: %v", err) + } + ctx := context.Background() - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") + sourceImageName, _ := reference.ParseNamed("foo/source") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) } + repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } - bs := repository.Blobs(ctx) + sourceRepository, err := registry.Repository(ctx, sourceImageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } - simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar) -} + sbs := sourceRepository.Blobs(ctx) + + blobUpload, err := sbs.Create(ctx) -// TestRemoveParentsOnDelete verifies that blob store deletes a directory -// together with blob's data or link when RemoveParentsOnDelete option is -// applied. -func TestRemoveBlobParentsOnDelete(t *testing.T) { - ctx := context.Background() - imageName := "foo/bar" - driver := inmemory.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect, RemoveParentsOnDelete) if err != nil { - t.Fatalf("error creating registry: %v", err) + t.Fatalf("unexpected error starting layer upload: %s", err) } - repository, err := registry.Repository(ctx, imageName) + + // Get the size of our random tarfile + randomDataSize, err := seekerSize(randomDataReader) if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) + t.Fatalf("error getting seeker size of random data: %v", err) } - bs := repository.Blobs(ctx) - checkBlobParentPath(t, ctx, driver, "", digest.DigestSha256EmptyTar, false) - checkBlobParentPath(t, ctx, driver, imageName, digest.DigestSha256EmptyTar, false) + nn, err := io.Copy(blobUpload, randomDataReader) + if err != nil { + t.Fatalf("unexpected error uploading layer data: %v", err) + } - simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar) + desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error finishing layer upload: %v", err) + } + + // Test for existence. + statDesc, err := sbs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, sbs) + } - checkBlobParentPath(t, ctx, driver, "", digest.DigestSha256EmptyTar, true) - checkBlobParentPath(t, ctx, driver, imageName, digest.DigestSha256EmptyTar, true) + if !reflect.DeepEqual(statDesc, desc) { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + bs := repository.Blobs(ctx) + // Test destination for existence. + statDesc, err = bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating unmounted blob: %v", desc) + } - // Delete a layer link - err = bs.Delete(ctx, digest.DigestSha256EmptyTar) + canonicalRef, err := reference.WithDigest(sourceRepository.Named(), desc.Digest) if err != nil { - t.Fatalf("Unexpected error deleting blob: %v", err) + t.Fatal(err) + } + + bw, err := bs.Create(ctx, WithMountFrom(canonicalRef)) + if bw != nil { + t.Fatal("unexpected blobwriter returned from Create call, should mount instead") } - checkBlobParentPath(t, ctx, driver, "", digest.DigestSha256EmptyTar, true) - checkBlobParentPath(t, ctx, driver, imageName, digest.DigestSha256EmptyTar, false) + ebm, ok := err.(distribution.ErrBlobMounted) + if !ok { + t.Fatalf("unexpected error mounting layer: %v", err) + } + + if !reflect.DeepEqual(ebm.Descriptor, desc) { + t.Fatalf("descriptors not equal: %v != %v", ebm.Descriptor, desc) + } - bd, err := RegistryBlobDeleter(registry) + // Test for existence. + statDesc, err = bs.Stat(ctx, desc.Digest) if err != nil { - t.Fatalf("failed to obtain blob deleter: %v", err) + t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) } - bd.Delete(ctx, digest.DigestSha256EmptyTar) - checkBlobParentPath(t, ctx, driver, "", digest.DigestSha256EmptyTar, false) - checkBlobParentPath(t, ctx, driver, imageName, digest.DigestSha256EmptyTar, false) -} + if !reflect.DeepEqual(statDesc, desc) { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } -// TestBlobEnumeration checks whether enumeration of repository and registry's -// blobs returns proper results. -func TestBlobEnumeration(t *testing.T) { - ctx := context.Background() - imageNames := []string{"foo/bar", "baz/gas"} - driver := inmemory.New() - reg, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + rc, err := bs.Open(ctx, desc.Digest) if err != nil { - t.Fatalf("error creating registry: %v", err) + t.Fatalf("unexpected error opening blob for read: %v", err) } - // holds a repository objects corresponding to imageNames - repositories := make([]distribution.Repository, len(imageNames)) - // holds blob store of each repository - blobStores := make([]distribution.BlobStore, len(imageNames)) - for i, name := range imageNames { - repositories[i], err = reg.Repository(ctx, name) - if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) - } - blobStores[i] = repositories[i].Blobs(ctx) - } - be, err := RegistryBlobEnumerator(reg) - if err != nil { - t.Fatalf("unexpected error getting blob enumerator: %v", err) - } - - // doEnumeration calls Enumerate method on all repositories and registry's blob store. - // Additinal arguments represent expected digests for each repository defined. - doEnumeration := func(expectRegistryBlobs []digest.Digest, expectDigests ...[]digest.Digest) { - expBlobSets := make([]map[digest.Digest]struct{}, len(imageNames)) - // each number is a counter of tarsum digests for corresponding repository - tarsumDgstCounts := make([]int, len(imageNames)) - totalBlobSet := make(map[digest.Digest]struct{}) - tarsumTotalDgstCount := 0 - for i, dgsts := range expectDigests { - expBlobSets[i] = make(map[digest.Digest]struct{}) - for _, dgst := range dgsts { - expBlobSets[i][dgst] = struct{}{} - if strings.HasPrefix(dgst.String(), "tarsum") { - tarsumDgstCounts[i]++ - } - } - } - for _, d := range expectRegistryBlobs { - if strings.HasPrefix(d.String(), "tarsum") { - tarsumTotalDgstCount++ - } else { - totalBlobSet[d] = struct{}{} - } - } - - unexpected := []digest.Digest{} - blobTarsumDigests := make(map[digest.Digest]struct{}) - - dgsts := make([]digest.Digest, 0, len(totalBlobSet)+tarsumTotalDgstCount+1) - - for i, bs := range blobStores { - err := bs.Enumerate(ctx, func(dgst digest.Digest) error { - dgsts = append(dgsts, dgst) - return nil - }) - if err != io.EOF { - t.Fatalf("expected io.EOF when enumerating blobs of repository %s, not: %v", imageNames[i], err) - } - // linked blob store stores 2 links per tarsum blob - if len(dgsts) != len(expBlobSets[i])+tarsumDgstCounts[i] { - t.Errorf("got unexpected number of blobs in repository %s (%d != %d)", imageNames[i], len(dgsts), len(expBlobSets[i])+tarsumDgstCounts[i]) - } - for _, d := range dgsts { - if _, exists := expBlobSets[i][d]; !exists { - unexpected = append(unexpected, d) - blobTarsumDigests[d] = struct{}{} - } - delete(expBlobSets[i], d) - } - if len(unexpected) != tarsumDgstCounts[i] { - for _, d := range dgsts { - t.Errorf("received unexpected blob digest %s in repository %s", d, imageNames[i]) - } - } - for d := range expBlobSets[i] { - t.Errorf("expected digest %s not received for repository %s", d, imageNames[i]) - } - unexpected = unexpected[:0] - dgsts = dgsts[0:0] - } - - err := be.Enumerate(ctx, func(dgst digest.Digest) error { - dgsts = append(dgsts, dgst) - return nil - }) - if err != io.EOF { - t.Fatalf("expected io.EOF when enumerating registry blobs, not: %v", err) - } - if len(dgsts) != len(totalBlobSet)+tarsumTotalDgstCount { - t.Errorf("got unexpected number of blobs in registry (%d != %d)", len(dgsts), len(totalBlobSet)+tarsumTotalDgstCount) - } - for _, d := range dgsts { - if _, exists := totalBlobSet[d]; !exists { - unexpected = append(unexpected, d) - } - delete(totalBlobSet, d) - } - for _, d := range unexpected { - if _, exists := blobTarsumDigests[d]; !exists || len(unexpected) != tarsumTotalDgstCount { - t.Errorf("received unexpected blob digest %s", d) - } - } - for d := range totalBlobSet { - t.Errorf("expected digest %s not received", d) - } - } - - doEnumeration( - []digest.Digest{}, - []digest.Digest{}, - []digest.Digest{}, - ) - - t.Logf("uploading an empty tarball to repository %s", imageNames[0]) - simpleUpload(t, blobStores[0], []byte{}, digest.DigestSha256EmptyTar) - - doEnumeration( - []digest.Digest{digest.DigestSha256EmptyTar}, - []digest.Digest{digest.DigestSha256EmptyTar}, - []digest.Digest{}, - ) - - t.Logf("uploading a random tarball to repository %s", imageNames[1]) - tarballDgst := uploadRandomTarball(t, ctx, blobStores[1]) - - doEnumeration( - []digest.Digest{digest.DigestSha256EmptyTar, tarballDgst}, - []digest.Digest{digest.DigestSha256EmptyTar}, - []digest.Digest{tarballDgst}, - ) - - t.Logf("uploading a random layer to %s repository", imageNames[0]) - layerDgst := uploadRandomLayer(t, ctx, blobStores[0]) - - doEnumeration( - []digest.Digest{digest.DigestSha256EmptyTar, layerDgst, tarballDgst}, - []digest.Digest{digest.DigestSha256EmptyTar, layerDgst}, - []digest.Digest{tarballDgst}, - ) - - // delete is performed without parent directory being deleted - t.Logf("deleting empty layer data from registry") - bd, err := RegistryBlobDeleter(reg) - if err != nil { - t.Fatalf("failed to obtain blob deleter: %v", err) - } - err = bd.Delete(ctx, digest.DigestSha256EmptyTar) - if err != nil { - t.Fatalf("unexpected error while deleting registry blob: %v", err) - } - checkBlobParentPath(t, ctx, driver, "", digest.DigestSha256EmptyTar, true) - checkBlobParentPath(t, ctx, driver, imageNames[0], digest.DigestSha256EmptyTar, true) - checkBlobParentPath(t, ctx, driver, imageNames[1], digest.DigestSha256EmptyTar, false) - - // check that deletion had no effect on digests enumerated - doEnumeration( - []digest.Digest{digest.DigestSha256EmptyTar, layerDgst, tarballDgst}, - []digest.Digest{digest.DigestSha256EmptyTar, layerDgst}, - []digest.Digest{tarballDgst}, - ) - - // set RemoveParentsOnDelete and delete the layer again - if r, ok := reg.(*registry); ok { - RemoveParentsOnDelete(r) - } else { - t.Fatalf("failed to cast registry") - } - - repo, err := reg.Repository(ctx, imageNames[0]) + defer rc.Close() + + h := sha256.New() + nn, err = io.Copy(h, rc) if err != nil { - t.Fatalf("unexpected error getting repo: %v", err) + t.Fatalf("error reading layer: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("incorrect read length") } - bs := repo.Blobs(ctx) - bd, err = RegistryBlobDeleter(reg) + + if digest.NewDigest("sha256", h) != dgst { + t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), dgst) + } + + // Delete the blob from the source repo + err = sbs.Delete(ctx, desc.Digest) if err != nil { - t.Fatalf("failed to obtain blob deleter: %v", err) + t.Fatalf("Unexpected error deleting blob") } - t.Logf("deleting empty layer link directory from %s repository", imageNames[0]) - err = bs.Delete(ctx, digest.DigestSha256EmptyTar) + d, err := bs.Stat(ctx, desc.Digest) if err != nil { - t.Fatalf("unexpected error while deleting empty layer link: %v", err) + t.Fatalf("unexpected error stating blob deleted from source repository: %v", err) } - checkBlobParentPath(t, ctx, driver, "", digest.DigestSha256EmptyTar, true) - checkBlobParentPath(t, ctx, driver, imageNames[0], digest.DigestSha256EmptyTar, false) - checkBlobParentPath(t, ctx, driver, imageNames[1], digest.DigestSha256EmptyTar, false) - // verify that blob data is still in registry's store - doEnumeration( - []digest.Digest{digest.DigestSha256EmptyTar, layerDgst, tarballDgst}, - []digest.Digest{layerDgst}, - []digest.Digest{tarballDgst}, - ) + d, err = sbs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } - t.Logf("deleting empty layer directory from registry") - err = bd.Delete(ctx, digest.DigestSha256EmptyTar) + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } + + // Delete the blob from the dest repo + err = bs.Delete(ctx, desc.Digest) if err != nil { - t.Fatalf("unexpected error while deleting registry blob: %v", err) + t.Fatalf("Unexpected error deleting blob") } - doEnumeration( - []digest.Digest{layerDgst, tarballDgst}, - []digest.Digest{layerDgst}, - []digest.Digest{tarballDgst}, - ) + d, err = bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } - checkBlobParentPath(t, ctx, driver, "", digest.DigestSha256EmptyTar, false) + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } } -// TestBlobEnumeration checks whether enumeration of repository and registry's -// blobs returns proper results when callback indicates *stop processing*. -func TestBlobStopEnumeration(t *testing.T) { - const numDigests = 10 +// TestLayerUploadZeroLength uploads zero-length +func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") driver := inmemory.New() - reg, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) } - - repo, err := reg.Repository(ctx, imageName) + repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } - bs := repo.Blobs(ctx) - if err != nil { - t.Fatalf("unexpected error getting blob enumerator: %v", err) - } - be, err := RegistryBlobEnumerator(reg) - if err != nil { - t.Fatal(err) - } + bs := repository.Blobs(ctx) - digests := make([]digest.Digest, numDigests) - for i := 0; i < numDigests; i++ { - var dgst digest.Digest - if i%2 == 0 { - dgst = uploadRandomLayer(t, ctx, bs) - t.Logf("uploaded a new layer with digest %s", dgst.String()) - } else { - dgst = uploadRandomTarball(t, ctx, bs) - t.Logf("uploaded a new tarsum layer with digest %s", dgst.String()) - } - digests[i] = dgst - } - tarsumDigests := numDigests/2 + numDigests%1 - layerLinkDigests, err := enumerateAllBlobs(bs, ctx) - // two links are written for one tarsum digest in the blob store - if len(layerLinkDigests) != numDigests+tarsumDigests { - t.Fatalf("unexpected number of digests (%d != %d)", len(layerLinkDigests), numDigests+tarsumDigests) - } - // registry doesn't contain tarsum digests - blobDigests := make([]digest.Digest, 0, len(digests)) - for _, d := range layerLinkDigests { - if !strings.HasPrefix(d.String(), "tarsum") { - blobDigests = append(blobDigests, d) - } - } - - testEnum := func(be distribution.BlobEnumerator, stopAfter int, withEOF bool, allDigests []digest.Digest) { - ret := "error" - if withEOF { - ret = "EOF" - } - testCtx := fmt.Sprintf("%T: stopAfter=%d with %s", be, stopAfter, ret) - - dgsts := []digest.Digest{} - err := be.Enumerate(ctx, func(dgst digest.Digest) error { - if err := dgst.Validate(); err != nil { - t.Errorf("%s: ingest callback called with invalid digest %q: %v", testCtx, dgst.String(), err) - } - if len(dgsts) >= stopAfter { - t.Errorf("%s: ingest callback called again after returning a request for stop (n=%d)", testCtx, len(dgsts)) - } - dgsts = append(dgsts, dgst) - if len(dgsts) >= stopAfter { - if withEOF { - return io.EOF - } - return fmt.Errorf("don't call us again") - } - return nil - }) - - if err == io.EOF && len(dgsts) < len(allDigests) { - t.Errorf("%s: got unexpected io.EOF", testCtx) - } else if len(dgsts) > len(allDigests) && err != io.EOF { - t.Errorf("%s: expected io.EOF, got: %v", testCtx, err) - } - - if len(dgsts) != stopAfter { - t.Errorf("%s: ingest function called %d times instead of %d", testCtx, len(dgsts), stopAfter) - } - - dgstSet := map[digest.Digest]struct{}{} - for _, d := range allDigests { - if _, exists := dgstSet[d]; exists { - t.Errorf("%s: received duplicate digest %q", testCtx, d.String()) - } - dgstSet[d] = struct{}{} - } - - for _, dgst := range allDigests { - delete(dgstSet, dgst) - } - - for dgst := range dgstSet { - t.Errorf("%s: got unexpected digest %q", testCtx, dgst.String()) - } - } - - // enumerate linked blob store - testEnum(bs, 10, true, layerLinkDigests) - testEnum(bs, 4, true, layerLinkDigests) - testEnum(bs, 1, true, layerLinkDigests) - - testEnum(bs, 10, false, layerLinkDigests) - testEnum(bs, 4, false, layerLinkDigests) - testEnum(bs, 1, false, layerLinkDigests) - - testEnum(be, 10, true, blobDigests) - testEnum(be, 4, true, blobDigests) - testEnum(be, 1, true, blobDigests) - - testEnum(be, 10, false, blobDigests) - testEnum(be, 4, false, blobDigests) - testEnum(be, 1, false, blobDigests) + simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar) } func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) { @@ -751,7 +554,7 @@ func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expec if dgst != expectedDigest { // sanity check on zero digest - t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar) + t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest) } desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}) @@ -807,106 +610,3 @@ func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distributio return wr.Commit(ctx, desc) } - -func createRandomData() (io.ReadSeeker, int64, error) { - fileSize := mrand.Int63n(1<<20) + 1<<20 - - randomData := make([]byte, fileSize) - // Fill up the buffer with some random data. - n, err := rand.Read(randomData) - if err != nil { - return nil, 0, fmt.Errorf("failed to fill buffer with random data: %v", err) - } - if n != len(randomData) { - return nil, 0, fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) - } - - return bytes.NewReader(randomData), fileSize, nil -} - -func uploadRandomLayer(t *testing.T, ctx context.Context, bi distribution.BlobIngester) digest.Digest { - dr, size, err := createRandomData() - if err != nil { - t.Fatalf("failed to create random file: %v", err) - } - - h := sha256.New() - rd := io.TeeReader(dr, h) - blobUpload, err := bi.Create(ctx) - if err != nil { - t.Fatalf("unexpected error starting layer upload: %s", err) - } - nn, err := io.Copy(blobUpload, rd) - if err != nil { - t.Fatalf("unexpected error uploading layer data: %v", err) - } - if nn != size { - t.Fatalf("layer data write incomplete") - } - dgst := digest.NewDigest("sha256", h) - _, err = blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) - if err != nil { - t.Fatalf("unexpected error finishing layer upload: %v", err) - } - return dgst -} - -func uploadRandomTarball(t *testing.T, ctx context.Context, bi distribution.BlobIngester) digest.Digest { - randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating random reader: %v", err) - } - dgst := digest.Digest(tarSumStr) - if err != nil { - t.Fatalf("error allocating upload store: %v", err) - } - - randomLayerSize, err := seekerSize(randomDataReader) - if err != nil { - t.Fatalf("error getting seeker size for random layer: %v", err) - } - - _, err = addBlob(ctx, bi, distribution.Descriptor{ - Digest: dgst, - MediaType: "application/octet-stream", - Size: randomLayerSize, - }, randomDataReader) - if err != nil { - t.Fatalf("failed to add blob: %v", err) - } - return dgst -} - -// checkBlobParentPath asserts that a directory containing blob's link or data -// does (not) exist. If repoName is given, link path in _layers directory of -// that repository will be checked. Registry's blob store will be checked -// otherwise. -func checkBlobParentPath(t *testing.T, ctx context.Context, driver *inmemory.Driver, repoName string, dgst digest.Digest, expectExistent bool) { - var ( - blobPath string - err error - ) - - if repoName != "" { - blobPath, err = pathFor(layerLinkPathSpec{name: repoName, digest: dgst}) - if err != nil { - t.Fatalf("failed to get layer link path for repo=%s, digest=%s: %v", repoName, dgst.String(), err) - } - blobPath = path.Dir(blobPath) - } else { - blobPath, err = pathFor(blobPathSpec{digest: dgst}) - if err != nil { - t.Fatalf("failed to get blob path for digest %s: %v", dgst.String(), err) - } - } - - parentExists, err := exists(ctx, driver, blobPath) - if err != nil { - t.Fatalf("failed to check whether path %s exists: %v", blobPath, err) - } - if expectExistent && !parentExists { - t.Errorf("expected blob path %s to exist", blobPath) - } else if !expectExistent && parentExists { - t.Errorf("expected blob path %s not to exist", blobPath) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go index 45f81f53d1ce..2655e0113929 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobserver.go @@ -34,45 +34,45 @@ func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *h return err } - redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) - - switch err.(type) { - case nil: - if bs.redirect { + if bs.redirect { + redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) + switch err.(type) { + case nil: // Redirect to storage URL. http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) return err - } - case driver.ErrUnsupportedMethod: - // Fallback to serving the content directly. - br, err := newFileReader(ctx, bs.driver, path, desc.Size) - if err != nil { + case driver.ErrUnsupportedMethod: + // Fallback to serving the content directly. + default: + // Some unexpected error. return err } - defer br.Close() + } - w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent - w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) + br, err := newFileReader(ctx, bs.driver, path, desc.Size) + if err != nil { + return err + } + defer br.Close() - if w.Header().Get("Docker-Content-Digest") == "" { - w.Header().Set("Docker-Content-Digest", desc.Digest.String()) - } + w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent + w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) - if w.Header().Get("Content-Type") == "" { - // Set the content type if not already set. - w.Header().Set("Content-Type", desc.MediaType) - } + if w.Header().Get("Docker-Content-Digest") == "" { + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + } - if w.Header().Get("Content-Length") == "" { - // Set the content length if not already set. - w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) - } + if w.Header().Get("Content-Type") == "" { + // Set the content type if not already set. + w.Header().Set("Content-Type", desc.MediaType) + } - http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) - return nil + if w.Header().Get("Content-Length") == "" { + // Set the content length if not already set. + w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) } - // Some unexpected error. - return err + http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) + return nil } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go index ede644a545c9..9034cb6895dd 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobstore.go @@ -1,7 +1,6 @@ package storage import ( - "io" "path" "github.com/docker/distribution" @@ -15,17 +14,11 @@ import ( // intentionally a leaky abstraction, providing utility methods that support // creating and traversing backend links. type blobStore struct { - driver driver.StorageDriver - statter distribution.BlobDescriptorService - deleteEnabled bool - // Causes directory containing blob's data to be removed recursively upon - // Delete. - removeParentsOnDelete bool + driver driver.StorageDriver + statter distribution.BlobStatter } -var _ distribution.BlobService = &blobStore{} -var _ distribution.BlobEnumerator = &blobStore{} -var _ distribution.BlobDeleter = &blobStore{} +var _ distribution.BlobProvider = &blobStore{} // Get implements the BlobReadService.Get call. func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { @@ -65,12 +58,7 @@ func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution // content is already present, only the digest will be returned. This should // only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - dgst, err := digest.FromBytes(p) - if err != nil { - context.GetLogger(ctx).Errorf("blobStore: error digesting content: %v, %s", err, string(p)) - return distribution.Descriptor{}, err - } - + dgst := digest.FromBytes(p) desc, err := bs.statter.Stat(ctx, dgst) if err == nil { // content already present @@ -99,62 +87,34 @@ func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distr }, bs.driver.PutContent(ctx, bp, p) } -func (bs *blobStore) Enumerate(ctx context.Context, ingest func(digest.Digest) error) error { - context.GetLogger(ctx).Debug("(*blobStore).Enumerate") - rootPath := path.Join(storagePathRoot, storagePathVersion, "blobs") +func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error { - walkFn, err := makeBlobStoreWalkFunc(rootPath, true, ingest) + specPath, err := pathFor(blobsPathSpec{}) if err != nil { return err } - err = Walk(ctx, bs.driver, rootPath, walkFn) - if err != nil { - switch err.(type) { - case driver.PathNotFoundError: - return io.EOF - } - if err == ErrFinishedWalk { + err = Walk(ctx, bs.driver, specPath, func(fileInfo driver.FileInfo) error { + // skip directories + if fileInfo.IsDir() { return nil } - return err - } - - return io.EOF -} - -func (bs *blobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { - return nil, distribution.ErrUnsupported -} -func (bs *blobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - return nil, distribution.ErrUnsupported -} - -func (bs *blobStore) Delete(ctx context.Context, dgst digest.Digest) error { - var ( - blobPath string - err error - ) - if !bs.deleteEnabled { - return distribution.ErrUnsupported - } - - if bs.removeParentsOnDelete { - blobPath, err = pathFor(blobPathSpec{digest: dgst}) - } else { - blobPath, err = pathFor(blobDataPathSpec{digest: dgst}) - } - if err != nil { - return err - } + currentPath := fileInfo.Path() + // we only want to parse paths that end with /data + _, fileName := path.Split(currentPath) + if fileName != "data" { + return nil + } - context.GetLogger(ctx).Infof("Deleting blob path: %s", blobPath) - return bs.driver.Delete(ctx, blobPath) -} + digest, err := digestFromPath(currentPath) + if err != nil { + return err + } -func (bs *blobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return bs.statter.Stat(ctx, dgst) + return ingester(digest) + }) + return err } // path returns the canonical path for the blob identified by digest. The blob diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go index 3453a57ad344..3387bafb112f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter.go @@ -18,9 +18,10 @@ var ( errResumableDigestNotAvailable = errors.New("resumable digest not available") ) -// layerWriter is used to control the various aspects of resumable -// layer upload. It implements the LayerUpload interface. +// blobWriter is used to control the various aspects of resumable +// blob upload. type blobWriter struct { + ctx context.Context blobStore *linkedBlobStore id string @@ -28,11 +29,12 @@ type blobWriter struct { digester digest.Digester written int64 // track the contiguous write - // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy - // LayerUpload Interface - bufferedFileWriter + fileWriter storagedriver.FileWriter + driver storagedriver.StorageDriver + path string resumableDigestEnabled bool + committed bool } var _ distribution.BlobWriter = &blobWriter{} @@ -51,10 +53,12 @@ func (bw *blobWriter) StartedAt() time.Time { func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { context.GetLogger(ctx).Debug("(*blobWriter).Commit") - if err := bw.bufferedFileWriter.Close(); err != nil { + if err := bw.fileWriter.Commit(); err != nil { return distribution.Descriptor{}, err } + bw.Close() + canonical, err := bw.validateBlob(ctx, desc) if err != nil { return distribution.Descriptor{}, err @@ -77,6 +81,7 @@ func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) return distribution.Descriptor{}, err } + bw.committed = true return canonical, nil } @@ -84,23 +89,34 @@ func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) // the writer and canceling the operation. func (bw *blobWriter) Cancel(ctx context.Context) error { context.GetLogger(ctx).Debug("(*blobWriter).Rollback") + if err := bw.fileWriter.Cancel(); err != nil { + return err + } + + if err := bw.Close(); err != nil { + context.GetLogger(ctx).Errorf("error closing blobwriter: %s", err) + } + if err := bw.removeResources(ctx); err != nil { return err } - bw.Close() return nil } +func (bw *blobWriter) Size() int64 { + return bw.fileWriter.Size() +} + func (bw *blobWriter) Write(p []byte) (int, error) { // Ensure that the current write offset matches how many bytes have been // written to the digester. If not, we need to update the digest state to // match the current write position. - if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { + if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { return 0, err } - n, err := io.MultiWriter(&bw.bufferedFileWriter, bw.digester.Hash()).Write(p) + n, err := io.MultiWriter(bw.fileWriter, bw.digester.Hash()).Write(p) bw.written += int64(n) return n, err @@ -110,26 +126,26 @@ func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { // Ensure that the current write offset matches how many bytes have been // written to the digester. If not, we need to update the digest state to // match the current write position. - if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { + if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable { return 0, err } - nn, err := bw.bufferedFileWriter.ReadFrom(io.TeeReader(r, bw.digester.Hash())) + nn, err := io.Copy(io.MultiWriter(bw.fileWriter, bw.digester.Hash()), r) bw.written += nn return nn, err } func (bw *blobWriter) Close() error { - if bw.err != nil { - return bw.err + if bw.committed { + return errors.New("blobwriter close after commit") } if err := bw.storeHashState(bw.blobStore.ctx); err != nil { return err } - return bw.bufferedFileWriter.Close() + return bw.fileWriter.Close() } // validateBlob checks the data against the digest, returning an error if it @@ -148,8 +164,10 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri } } + var size int64 + // Stat the on disk file - if fi, err := bw.bufferedFileWriter.driver.Stat(ctx, bw.path); err != nil { + if fi, err := bw.driver.Stat(ctx, bw.path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // NOTE(stevvooe): We really don't care if the file is @@ -165,23 +183,23 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) } - bw.size = fi.Size() + size = fi.Size() } if desc.Size > 0 { - if desc.Size != bw.size { + if desc.Size != size { return distribution.Descriptor{}, distribution.ErrBlobInvalidLength } } else { // if provided 0 or negative length, we can assume caller doesn't know or // care about length. - desc.Size = bw.size + desc.Size = size } // TODO(stevvooe): This section is very meandering. Need to be broken down // to be a lot more clear. - if err := bw.resumeDigestAt(ctx, bw.size); err == nil { + if err := bw.resumeDigest(ctx); err == nil { canonical = bw.digester.Digest() if canonical.Algorithm() == desc.Digest.Algorithm() { @@ -206,7 +224,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri // the same, we don't need to read the data from the backend. This is // because we've written the entire file in the lifecycle of the // current instance. - if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() { + if bw.written == size && digest.Canonical == desc.Digest.Algorithm() { canonical = bw.digester.Digest() verified = desc.Digest == canonical } @@ -223,7 +241,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri } // Read the file from the backend driver and validate it. - fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Size) + fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size) if err != nil { return distribution.Descriptor{}, err } @@ -302,7 +320,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor // get a hash, then the underlying file is deleted, we risk moving // a zero-length blob into a nonzero-length blob location. To // prevent this horrid thing, we employ the hack of only allowing - // to this happen for the zero tarsum. + // to this happen for the digest of an empty tar. if desc.Digest == digest.DigestSha256EmptyTar { return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) } @@ -326,7 +344,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor // resources are already not present, no error will be returned. func (bw *blobWriter) removeResources(ctx context.Context) error { dataPath, err := pathFor(uploadDataPathSpec{ - name: bw.blobStore.repository.Name(), + name: bw.blobStore.repository.Named().Name(), id: bw.id, }) @@ -357,7 +375,7 @@ func (bw *blobWriter) Reader() (io.ReadCloser, error) { // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 try := 1 for try <= 5 { - _, err := bw.bufferedFileWriter.driver.Stat(bw.ctx, bw.path) + _, err := bw.driver.Stat(bw.ctx, bw.path) if err == nil { break } @@ -371,7 +389,7 @@ func (bw *blobWriter) Reader() (io.ReadCloser, error) { } } - readCloser, err := bw.bufferedFileWriter.driver.ReadStream(bw.ctx, bw.path, 0) + readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0) if err != nil { return nil, err } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go index d33f544da2a9..ff5482c3fd21 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/blobwriter_resumable.go @@ -4,8 +4,6 @@ package storage import ( "fmt" - "io" - "os" "path" "strconv" @@ -19,24 +17,18 @@ import ( _ "github.com/stevvooe/resumable/sha512" ) -// resumeDigestAt attempts to restore the state of the internal hash function -// by loading the most recent saved hash state less than or equal to the given -// offset. Any unhashed bytes remaining less than the given offset are hashed -// from the content uploaded so far. -func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { +// resumeDigest attempts to restore the state of the internal hash function +// by loading the most recent saved hash state equal to the current size of the blob. +func (bw *blobWriter) resumeDigest(ctx context.Context) error { if !bw.resumableDigestEnabled { return errResumableDigestNotAvailable } - if offset < 0 { - return fmt.Errorf("cannot resume hash at negative offset: %d", offset) - } - h, ok := bw.digester.Hash().(resumable.Hash) if !ok { return errResumableDigestNotAvailable } - + offset := bw.fileWriter.Size() if offset == int64(h.Len()) { // State of digester is already at the requested offset. return nil @@ -49,24 +41,12 @@ func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) } - // Find the highest stored hashState with offset less than or equal to + // Find the highest stored hashState with offset equal to // the requested offset. for _, hashState := range hashStates { if hashState.offset == offset { hashStateMatch = hashState break // Found an exact offset match. - } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { - // This offset is closer to the requested offset. - hashStateMatch = hashState - } else if hashState.offset > offset { - // Remove any stored hash state with offsets higher than this one - // as writes to this resumed hasher will make those invalid. This - // is probably okay to skip for now since we don't expect anyone to - // use the API in this way. For that reason, we don't treat an - // an error here as a fatal error, but only log it. - if err := bw.driver.Delete(ctx, hashState.path); err != nil { - logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) - } } } @@ -86,20 +66,7 @@ func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { // Mind the gap. if gapLen := offset - int64(h.Len()); gapLen > 0 { - // Need to read content from the upload to catch up to the desired offset. - fr, err := newFileReader(ctx, bw.driver, bw.path, bw.size) - if err != nil { - return err - } - defer fr.Close() - - if _, err = fr.Seek(int64(h.Len()), os.SEEK_SET); err != nil { - return fmt.Errorf("unable to seek to layer reader offset %d: %s", h.Len(), err) - } - - if _, err := io.CopyN(h, fr, gapLen); err != nil { - return err - } + return errResumableDigestNotAvailable } return nil @@ -113,7 +80,7 @@ type hashStateEntry struct { // getStoredHashStates returns a slice of hashStateEntries for this upload. func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Name(), + name: bw.blobStore.repository.Named().String(), id: bw.id, alg: bw.digester.Digest().Algorithm(), list: true, @@ -159,7 +126,7 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error { } uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Name(), + name: bw.blobStore.repository.Named().String(), id: bw.id, alg: bw.digester.Digest().Algorithm(), offset: int64(h.Len()), diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go deleted file mode 100644 index 423909538c9f..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go +++ /dev/null @@ -1,179 +0,0 @@ -package cachecheck - -import ( - "testing" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/storage/cache" -) - -// CheckBlobDescriptorCache takes a cache implementation through a common set -// of operations. If adding new tests, please add them here so new -// implementations get the benefit. This should be used for unit tests. -func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) { - ctx := context.Background() - - checkBlobDescriptorCacheEmptyRepository(t, ctx, provider) - checkBlobDescriptorCacheSetAndRead(t, ctx, provider) -} - -func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { - if _, err := provider.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { - t.Fatalf("expected unknown blob error with empty store: %v", err) - } - - cache, err := provider.RepositoryScoped("") - if err == nil { - t.Fatalf("expected an error when asking for invalid repo") - } - - cache, err = provider.RepositoryScoped("foo/bar") - if err != nil { - t.Fatalf("unexpected error getting repository: %v", err) - } - - if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ - Digest: "sha384:abc", - Size: 10, - MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { - t.Fatalf("expected error with invalid digest: %v", err) - } - - if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{ - Digest: "", - Size: 10, - MediaType: "application/octet-stream"}); err == nil { - t.Fatalf("expected error setting value on invalid descriptor") - } - - if _, err := cache.Stat(ctx, ""); err != digest.ErrDigestInvalidFormat { - t.Fatalf("expected error checking for cache item with empty digest: %v", err) - } - - if _, err := cache.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { - t.Fatalf("expected unknown blob error with empty repo: %v", err) - } -} - -func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { - localDigest := digest.Digest("sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") - expected := distribution.Descriptor{ - Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111", - Size: 10, - MediaType: "application/octet-stream"} - - cache, err := provider.RepositoryScoped("foo/bar") - if err != nil { - t.Fatalf("unexpected error getting scoped cache: %v", err) - } - - if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { - t.Fatalf("error setting descriptor: %v", err) - } - - desc, err := cache.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("unexpected error statting fake2:abc: %v", err) - } - - if expected != desc { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - // also check that we set the canonical key ("fake:abc") - desc, err = cache.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("descriptor not returned for canonical key: %v", err) - } - - if expected != desc { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - // ensure that global gets extra descriptor mapping - desc, err = provider.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc) - } - - if desc != expected { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - // get at it through canonical descriptor - desc, err = provider.Stat(ctx, expected.Digest) - if err != nil { - t.Fatalf("unexpected error checking glboal descriptor: %v", err) - } - - if desc != expected { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - // now, we set the repo local mediatype to something else and ensure it - // doesn't get changed in the provider cache. - expected.MediaType = "application/json" - - if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { - t.Fatalf("unexpected error setting descriptor: %v", err) - } - - desc, err = cache.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("unexpected error getting descriptor: %v", err) - } - - if desc != expected { - t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) - } - - desc, err = provider.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("unexpected error getting global descriptor: %v", err) - } - - expected.MediaType = "application/octet-stream" // expect original mediatype in global - - if desc != expected { - t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) - } -} - -func checkBlobDescriptorClear(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { - localDigest := digest.Digest("sha384:abc") - expected := distribution.Descriptor{ - Digest: "sha256:abc", - Size: 10, - MediaType: "application/octet-stream"} - - cache, err := provider.RepositoryScoped("foo/bar") - if err != nil { - t.Fatalf("unexpected error getting scoped cache: %v", err) - } - - if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { - t.Fatalf("error setting descriptor: %v", err) - } - - desc, err := cache.Stat(ctx, localDigest) - if err != nil { - t.Fatalf("unexpected error statting fake2:abc: %v", err) - } - - if expected != desc { - t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) - } - - err = cache.Clear(ctx, localDigest) - if err != nil { - t.Fatalf("unexpected error deleting descriptor") - } - - nonExistantDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - err = cache.Clear(ctx, nonExistantDigest) - if err == nil { - t.Fatalf("expected error deleting unknown descriptor") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go index 1736756e779a..cb264b098f6b 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/cache/redis/redis.go @@ -249,7 +249,7 @@ func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx cont } // Also set the values for the primary descriptor, if they differ by - // algorithm (ie sha256 vs tarsum). + // algorithm (ie sha256 vs sha512). if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() { if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil { return err diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go index 7d0f84e269f4..3b13b7ad1f24 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/catalog.go @@ -10,6 +10,11 @@ import ( "github.com/docker/distribution/registry/storage/driver" ) +// ErrFinishedWalk is used when the called walk function no longer wants +// to accept any more values. This is used for pagination when the +// required number of repos have been found. +var ErrFinishedWalk = errors.New("finished walk") + // Returns a list, or partial list, of repositories in the registry. // Because it's a quite expensive operation, it should only be used when building up // an initial set of repositories. @@ -25,15 +30,15 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri return 0, err } - err = WalkSortedChildren(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { + err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() // lop the base path off repoPath := filePath[len(root)+1:] _, file := path.Split(repoPath) - if file == layersDirectory { - repoPath = strings.TrimSuffix(repoPath, "/"+layersDirectory) + if file == "_layers" { + repoPath = strings.TrimSuffix(repoPath, "/_layers") if repoPath > last { foundRepos = append(foundRepos, repoPath) } @@ -59,3 +64,34 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri return n, errVal } + +// Enumerate applies ingester to each repository +func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) error { + repoNameBuffer := make([]string, 100) + var last string + for { + n, err := reg.Repositories(ctx, repoNameBuffer, last) + if err != nil && err != io.EOF { + return err + } + + if n == 0 { + break + } + + last = repoNameBuffer[n-1] + for i := 0; i < n; i++ { + repoName := repoNameBuffer[i] + err = ingester(repoName) + if err != nil { + return err + } + } + + if err == io.EOF { + break + } + } + return nil + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go index cbb959812dcb..b06b08764d8b 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/azure.go @@ -3,6 +3,7 @@ package azure import ( + "bufio" "bytes" "fmt" "io" @@ -26,6 +27,7 @@ const ( paramAccountKey = "accountkey" paramContainer = "container" paramRealm = "realm" + maxChunkSize = 4 * 1024 * 1024 ) type driver struct { @@ -117,18 +119,21 @@ func (d *driver) PutContent(ctx context.Context, path string, contents []byte) e if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil { return err } - if err := d.client.CreateBlockBlob(d.container, path); err != nil { + writer, err := d.Writer(ctx, path, false) + if err != nil { + return err + } + defer writer.Close() + _, err = writer.Write(contents) + if err != nil { return err } - bs := newAzureBlockStorage(d.client) - bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) - _, err := bw.WriteBlobAt(d.container, path, 0, bytes.NewReader(contents)) - return err + return writer.Commit() } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// Reader retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { if ok, err := d.client.BlobExists(d.container, path); err != nil { return nil, err } else if !ok { @@ -153,25 +158,38 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. return resp, nil } -// WriteStream stores the contents of the provided io.ReadCloser at a location -// designated by the given path. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { - if blobExists, err := d.client.BlobExists(d.container, path); err != nil { - return 0, err - } else if !blobExists { - err := d.client.CreateBlockBlob(d.container, path) +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + blobExists, err := d.client.BlobExists(d.container, path) + if err != nil { + return nil, err + } + var size int64 + if blobExists { + if append { + blobProperties, err := d.client.GetBlobProperties(d.container, path) + if err != nil { + return nil, err + } + size = blobProperties.ContentLength + } else { + err := d.client.DeleteBlob(d.container, path) + if err != nil { + return nil, err + } + } + } else { + if append { + return nil, storagedriver.PathNotFoundError{Path: path} + } + err := d.client.PutAppendBlob(d.container, path, nil) if err != nil { - return 0, err + return nil, err } } - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - bs := newAzureBlockStorage(d.client) - bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) - zw := newZeroFillWriter(&bw) - return zw.Write(d.container, path, offset, reader) + return d.newWriter(path, size), nil } // Stat retrieves the FileInfo for the given path, including the current size @@ -236,6 +254,9 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { } list := directDescendants(blobs, path) + if path != "" && len(list) == 0 { + return nil, storagedriver.PathNotFoundError{Path: path} + } return list, nil } @@ -361,6 +382,101 @@ func (d *driver) listBlobs(container, virtPath string) ([]string, error) { } func is404(err error) bool { - e, ok := err.(azure.AzureStorageServiceError) - return ok && e.StatusCode == http.StatusNotFound + statusCodeErr, ok := err.(azure.AzureStorageServiceError) + return ok && statusCodeErr.StatusCode == http.StatusNotFound +} + +type writer struct { + driver *driver + path string + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(path string, size int64) storagedriver.FileWriter { + return &writer{ + driver: d, + path: path, + size: size, + bw: bufio.NewWriterSize(&blockWriter{ + client: d.client, + container: d.container, + path: path, + }, maxChunkSize), + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + n, err := w.bw.Write(p) + w.size += int64(n) + return n, err +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.bw.Flush() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + return w.driver.client.DeleteBlob(w.driver.container, w.path) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + w.committed = true + return w.bw.Flush() +} + +type blockWriter struct { + client azure.BlobStorageClient + container string + path string +} + +func (bw *blockWriter) Write(p []byte) (int, error) { + n := 0 + for offset := 0; offset < len(p); offset += maxChunkSize { + chunkSize := maxChunkSize + if offset+chunkSize > len(p) { + chunkSize = len(p) - offset + } + err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize]) + if err != nil { + return n, err + } + + n += chunkSize + } + + return n, nil } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go deleted file mode 100644 index 1c1df899ce30..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob.go +++ /dev/null @@ -1,24 +0,0 @@ -package azure - -import ( - "fmt" - "io" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -// azureBlockStorage is adaptor between azure.BlobStorageClient and -// blockStorage interface. -type azureBlockStorage struct { - azure.BlobStorageClient -} - -func (b *azureBlockStorage) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { - return b.BlobStorageClient.GetBlobRange(container, blob, fmt.Sprintf("%v-%v", start, start+length-1)) -} - -func newAzureBlockStorage(b azure.BlobStorageClient) azureBlockStorage { - a := azureBlockStorage{} - a.BlobStorageClient = b - return a -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go deleted file mode 100644 index 7ce4719577ff..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockblob_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package azure - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -type StorageSimulator struct { - blobs map[string]*BlockBlob -} - -type BlockBlob struct { - blocks map[string]*DataBlock - blockList []string -} - -type DataBlock struct { - data []byte - committed bool -} - -func (s *StorageSimulator) path(container, blob string) string { - return fmt.Sprintf("%s/%s", container, blob) -} - -func (s *StorageSimulator) BlobExists(container, blob string) (bool, error) { - _, ok := s.blobs[s.path(container, blob)] - return ok, nil -} - -func (s *StorageSimulator) GetBlob(container, blob string) (io.ReadCloser, error) { - bb, ok := s.blobs[s.path(container, blob)] - if !ok { - return nil, fmt.Errorf("blob not found") - } - - var readers []io.Reader - for _, bID := range bb.blockList { - readers = append(readers, bytes.NewReader(bb.blocks[bID].data)) - } - return ioutil.NopCloser(io.MultiReader(readers...)), nil -} - -func (s *StorageSimulator) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { - r, err := s.GetBlob(container, blob) - if err != nil { - return nil, err - } - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - return ioutil.NopCloser(bytes.NewReader(b[start : start+length])), nil -} - -func (s *StorageSimulator) CreateBlockBlob(container, blob string) error { - path := s.path(container, blob) - bb := &BlockBlob{ - blocks: make(map[string]*DataBlock), - blockList: []string{}, - } - s.blobs[path] = bb - return nil -} - -func (s *StorageSimulator) PutBlock(container, blob, blockID string, chunk []byte) error { - path := s.path(container, blob) - bb, ok := s.blobs[path] - if !ok { - return fmt.Errorf("blob not found") - } - data := make([]byte, len(chunk)) - copy(data, chunk) - bb.blocks[blockID] = &DataBlock{data: data, committed: false} // add block to blob - return nil -} - -func (s *StorageSimulator) GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) { - resp := azure.BlockListResponse{} - bb, ok := s.blobs[s.path(container, blob)] - if !ok { - return resp, fmt.Errorf("blob not found") - } - - // Iterate committed blocks (in order) - if blockType == azure.BlockListTypeAll || blockType == azure.BlockListTypeCommitted { - for _, blockID := range bb.blockList { - b := bb.blocks[blockID] - block := azure.BlockResponse{ - Name: blockID, - Size: int64(len(b.data)), - } - resp.CommittedBlocks = append(resp.CommittedBlocks, block) - } - - } - - // Iterate uncommitted blocks (in no order) - if blockType == azure.BlockListTypeAll || blockType == azure.BlockListTypeCommitted { - for blockID, b := range bb.blocks { - block := azure.BlockResponse{ - Name: blockID, - Size: int64(len(b.data)), - } - if !b.committed { - resp.UncommittedBlocks = append(resp.UncommittedBlocks, block) - } - } - } - return resp, nil -} - -func (s *StorageSimulator) PutBlockList(container, blob string, blocks []azure.Block) error { - bb, ok := s.blobs[s.path(container, blob)] - if !ok { - return fmt.Errorf("blob not found") - } - - var blockIDs []string - for _, v := range blocks { - bl, ok := bb.blocks[v.ID] - if !ok { // check if block ID exists - return fmt.Errorf("Block id '%s' not found", v.ID) - } - bl.committed = true - blockIDs = append(blockIDs, v.ID) - } - - // Mark all other blocks uncommitted - for k, b := range bb.blocks { - inList := false - for _, v := range blockIDs { - if k == v { - inList = true - break - } - } - if !inList { - b.committed = false - } - } - - bb.blockList = blockIDs - return nil -} - -func NewStorageSimulator() StorageSimulator { - return StorageSimulator{ - blobs: make(map[string]*BlockBlob), - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go deleted file mode 100644 index 776c7cd593c6..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid.go +++ /dev/null @@ -1,60 +0,0 @@ -package azure - -import ( - "encoding/base64" - "fmt" - "math/rand" - "sync" - "time" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -type blockIDGenerator struct { - pool map[string]bool - r *rand.Rand - m sync.Mutex -} - -// Generate returns an unused random block id and adds the generated ID -// to list of used IDs so that the same block name is not used again. -func (b *blockIDGenerator) Generate() string { - b.m.Lock() - defer b.m.Unlock() - - var id string - for { - id = toBlockID(int(b.r.Int())) - if !b.exists(id) { - break - } - } - b.pool[id] = true - return id -} - -func (b *blockIDGenerator) exists(id string) bool { - _, used := b.pool[id] - return used -} - -func (b *blockIDGenerator) Feed(blocks azure.BlockListResponse) { - b.m.Lock() - defer b.m.Unlock() - - for _, bl := range append(blocks.CommittedBlocks, blocks.UncommittedBlocks...) { - b.pool[bl.Name] = true - } -} - -func newBlockIDGenerator() *blockIDGenerator { - return &blockIDGenerator{ - pool: make(map[string]bool), - r: rand.New(rand.NewSource(time.Now().UnixNano()))} -} - -// toBlockId converts given integer to base64-encoded block ID of a fixed length. -func toBlockID(i int) string { - s := fmt.Sprintf("%029d", i) // add zero padding for same length-blobs - return base64.StdEncoding.EncodeToString([]byte(s)) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go deleted file mode 100644 index aab70202a907..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/blockid_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package azure - -import ( - "math" - "testing" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -func Test_blockIdGenerator(t *testing.T) { - r := newBlockIDGenerator() - - for i := 1; i <= 10; i++ { - if expected := i - 1; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - if id := r.Generate(); id == "" { - t.Fatal("returned empty id") - } - if expected := i; len(r.pool) != expected { - t.Fatalf("rand pool has wrong number of items: %d, expected:%d", len(r.pool), expected) - } - } -} - -func Test_blockIdGenerator_Feed(t *testing.T) { - r := newBlockIDGenerator() - if expected := 0; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - - // feed empty list - blocks := azure.BlockListResponse{} - r.Feed(blocks) - if expected := 0; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - - // feed blocks - blocks = azure.BlockListResponse{ - CommittedBlocks: []azure.BlockResponse{ - {"1", 1}, - {"2", 2}, - }, - UncommittedBlocks: []azure.BlockResponse{ - {"3", 3}, - }} - r.Feed(blocks) - if expected := 3; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } - - // feed same block IDs with committed/uncommitted place changed - blocks = azure.BlockListResponse{ - CommittedBlocks: []azure.BlockResponse{ - {"3", 3}, - }, - UncommittedBlocks: []azure.BlockResponse{ - {"1", 1}, - }} - r.Feed(blocks) - if expected := 3; len(r.pool) != expected { - t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) - } -} - -func Test_toBlockId(t *testing.T) { - min := 0 - max := math.MaxInt64 - - if len(toBlockID(min)) != len(toBlockID(max)) { - t.Fatalf("different-sized blockIDs are returned") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go deleted file mode 100644 index f18692d0b8d7..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter.go +++ /dev/null @@ -1,208 +0,0 @@ -package azure - -import ( - "fmt" - "io" - "io/ioutil" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -// blockStorage is the interface required from a block storage service -// client implementation -type blockStorage interface { - CreateBlockBlob(container, blob string) error - GetBlob(container, blob string) (io.ReadCloser, error) - GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) - PutBlock(container, blob, blockID string, chunk []byte) error - GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) - PutBlockList(container, blob string, blocks []azure.Block) error -} - -// randomBlobWriter enables random access semantics on Azure block blobs -// by enabling writing arbitrary length of chunks to arbitrary write offsets -// within the blob. Normally, Azure Blob Storage does not support random -// access semantics on block blobs; however, this writer can download, split and -// reupload the overlapping blocks and discards those being overwritten entirely. -type randomBlobWriter struct { - bs blockStorage - blockSize int -} - -func newRandomBlobWriter(bs blockStorage, blockSize int) randomBlobWriter { - return randomBlobWriter{bs: bs, blockSize: blockSize} -} - -// WriteBlobAt writes the given chunk to the specified position of an existing blob. -// The offset must be equals to size of the blob or smaller than it. -func (r *randomBlobWriter) WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) { - rand := newBlockIDGenerator() - - blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) - if err != nil { - return 0, err - } - rand.Feed(blocks) // load existing block IDs - - // Check for write offset for existing blob - size := getBlobSize(blocks) - if offset < 0 || offset > size { - return 0, fmt.Errorf("wrong offset for Write: %v", offset) - } - - // Upload the new chunk as blocks - blockList, nn, err := r.writeChunkToBlocks(container, blob, chunk, rand) - if err != nil { - return 0, err - } - - // For non-append operations, existing blocks may need to be splitted - if offset != size { - // Split the block on the left end (if any) - leftBlocks, err := r.blocksLeftSide(container, blob, offset, rand) - if err != nil { - return 0, err - } - blockList = append(leftBlocks, blockList...) - - // Split the block on the right end (if any) - rightBlocks, err := r.blocksRightSide(container, blob, offset, nn, rand) - if err != nil { - return 0, err - } - blockList = append(blockList, rightBlocks...) - } else { - // Use existing block list - var existingBlocks []azure.Block - for _, v := range blocks.CommittedBlocks { - existingBlocks = append(existingBlocks, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) - } - blockList = append(existingBlocks, blockList...) - } - // Put block list - return nn, r.bs.PutBlockList(container, blob, blockList) -} - -func (r *randomBlobWriter) GetSize(container, blob string) (int64, error) { - blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) - if err != nil { - return 0, err - } - return getBlobSize(blocks), nil -} - -// writeChunkToBlocks writes given chunk to one or multiple blocks within specified -// blob and returns their block representations. Those blocks are not committed, yet -func (r *randomBlobWriter) writeChunkToBlocks(container, blob string, chunk io.Reader, rand *blockIDGenerator) ([]azure.Block, int64, error) { - var newBlocks []azure.Block - var nn int64 - - // Read chunks of at most size N except the last chunk to - // maximize block size and minimize block count. - buf := make([]byte, r.blockSize) - for { - n, err := io.ReadFull(chunk, buf) - if err == io.EOF { - break - } - nn += int64(n) - data := buf[:n] - blockID := rand.Generate() - if err := r.bs.PutBlock(container, blob, blockID, data); err != nil { - return newBlocks, nn, err - } - newBlocks = append(newBlocks, azure.Block{ID: blockID, Status: azure.BlockStatusUncommitted}) - } - return newBlocks, nn, nil -} - -// blocksLeftSide returns the blocks that are going to be at the left side of -// the writeOffset: [0, writeOffset) by identifying blocks that will remain -// the same and splitting blocks and reuploading them as needed. -func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset int64, rand *blockIDGenerator) ([]azure.Block, error) { - var left []azure.Block - bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) - if err != nil { - return left, err - } - - o := writeOffset - elapsed := int64(0) - for _, v := range bx.CommittedBlocks { - blkSize := int64(v.Size) - if o >= blkSize { // use existing block - left = append(left, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) - o -= blkSize - elapsed += blkSize - } else if o > 0 { // current block needs to be splitted - start := elapsed - size := o - part, err := r.bs.GetSectionReader(container, blob, start, size) - if err != nil { - return left, err - } - newBlockID := rand.Generate() - - data, err := ioutil.ReadAll(part) - if err != nil { - return left, err - } - if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { - return left, err - } - left = append(left, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) - break - } - } - return left, nil -} - -// blocksRightSide returns the blocks that are going to be at the right side of -// the written chunk: [writeOffset+size, +inf) by identifying blocks that will remain -// the same and splitting blocks and reuploading them as needed. -func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset int64, chunkSize int64, rand *blockIDGenerator) ([]azure.Block, error) { - var right []azure.Block - - bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) - if err != nil { - return nil, err - } - - re := writeOffset + chunkSize - 1 // right end of written chunk - var elapsed int64 - for _, v := range bx.CommittedBlocks { - var ( - bs = elapsed // left end of current block - be = elapsed + int64(v.Size) - 1 // right end of current block - ) - - if bs > re { // take the block as is - right = append(right, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) - } else if be > re { // current block needs to be splitted - part, err := r.bs.GetSectionReader(container, blob, re+1, be-(re+1)+1) - if err != nil { - return right, err - } - newBlockID := rand.Generate() - - data, err := ioutil.ReadAll(part) - if err != nil { - return right, err - } - if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { - return right, err - } - right = append(right, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) - } - elapsed += int64(v.Size) - } - return right, nil -} - -func getBlobSize(blocks azure.BlockListResponse) int64 { - var n int64 - for _, v := range blocks.CommittedBlocks { - n += int64(v.Size) - } - return n -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go deleted file mode 100644 index 32c2509e4a15..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/randomwriter_test.go +++ /dev/null @@ -1,339 +0,0 @@ -package azure - -import ( - "bytes" - "io" - "io/ioutil" - "math/rand" - "reflect" - "strings" - "testing" - - azure "github.com/Azure/azure-sdk-for-go/storage" -) - -func TestRandomWriter_writeChunkToBlocks(t *testing.T) { - s := NewStorageSimulator() - rw := newRandomBlobWriter(&s, 3) - rand := newBlockIDGenerator() - c := []byte("AAABBBCCCD") - - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - bw, nn, err := rw.writeChunkToBlocks("a", "b", bytes.NewReader(c), rand) - if err != nil { - t.Fatal(err) - } - if expected := int64(len(c)); nn != expected { - t.Fatalf("wrong nn:%v, expected:%v", nn, expected) - } - if expected := 4; len(bw) != expected { - t.Fatal("unexpected written block count") - } - - bx, err := s.GetBlockList("a", "b", azure.BlockListTypeAll) - if err != nil { - t.Fatal(err) - } - if expected := 0; len(bx.CommittedBlocks) != expected { - t.Fatal("unexpected committed block count") - } - if expected := 4; len(bx.UncommittedBlocks) != expected { - t.Fatalf("unexpected uncommitted block count: %d -- %#v", len(bx.UncommittedBlocks), bx) - } - - if err := rw.bs.PutBlockList("a", "b", bw); err != nil { - t.Fatal(err) - } - - r, err := rw.bs.GetBlob("a", "b") - if err != nil { - t.Fatal(err) - } - assertBlobContents(t, r, c) -} - -func TestRandomWriter_blocksLeftSide(t *testing.T) { - blob := "AAAAABBBBBCCC" - cases := []struct { - offset int64 - expectedBlob string - expectedPattern []azure.BlockStatus - }{ - {0, "", []azure.BlockStatus{}}, // write to beginning, discard all - {13, blob, []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // write to end, no change - {1, "A", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // write at 1 - {5, "AAAAA", []azure.BlockStatus{azure.BlockStatusCommitted}}, // write just after first block - {6, "AAAAAB", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusUncommitted}}, // split the second block - {9, "AAAAABBBB", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusUncommitted}}, // write just after first block - } - - for _, c := range cases { - s := NewStorageSimulator() - rw := newRandomBlobWriter(&s, 5) - rand := newBlockIDGenerator() - - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - bw, _, err := rw.writeChunkToBlocks("a", "b", strings.NewReader(blob), rand) - if err != nil { - t.Fatal(err) - } - if err := rw.bs.PutBlockList("a", "b", bw); err != nil { - t.Fatal(err) - } - bx, err := rw.blocksLeftSide("a", "b", c.offset, rand) - if err != nil { - t.Fatal(err) - } - - bs := []azure.BlockStatus{} - for _, v := range bx { - bs = append(bs, v.Status) - } - - if !reflect.DeepEqual(bs, c.expectedPattern) { - t.Logf("Committed blocks %v", bw) - t.Fatalf("For offset %v: Expected pattern: %v, Got: %v\n(Returned: %v)", c.offset, c.expectedPattern, bs, bx) - } - if rw.bs.PutBlockList("a", "b", bx); err != nil { - t.Fatal(err) - } - r, err := rw.bs.GetBlob("a", "b") - if err != nil { - t.Fatal(err) - } - cout, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - outBlob := string(cout) - if outBlob != c.expectedBlob { - t.Fatalf("wrong blob contents: %v, expected: %v", outBlob, c.expectedBlob) - } - } -} - -func TestRandomWriter_blocksRightSide(t *testing.T) { - blob := "AAAAABBBBBCCC" - cases := []struct { - offset int64 - size int64 - expectedBlob string - expectedPattern []azure.BlockStatus - }{ - {0, 100, "", []azure.BlockStatus{}}, // overwrite the entire blob - {0, 3, "AABBBBBCCC", []azure.BlockStatus{azure.BlockStatusUncommitted, azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // split first block - {4, 1, "BBBBBCCC", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // write to last char of first block - {1, 6, "BBBCCC", []azure.BlockStatus{azure.BlockStatusUncommitted, azure.BlockStatusCommitted}}, // overwrite splits first and second block, last block remains - {3, 8, "CC", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // overwrite a block in middle block, split end block - {10, 1, "CC", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // overwrite first byte of rightmost block - {11, 2, "", []azure.BlockStatus{}}, // overwrite the rightmost index - {13, 20, "", []azure.BlockStatus{}}, // append to the end - } - - for _, c := range cases { - s := NewStorageSimulator() - rw := newRandomBlobWriter(&s, 5) - rand := newBlockIDGenerator() - - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - bw, _, err := rw.writeChunkToBlocks("a", "b", strings.NewReader(blob), rand) - if err != nil { - t.Fatal(err) - } - if err := rw.bs.PutBlockList("a", "b", bw); err != nil { - t.Fatal(err) - } - bx, err := rw.blocksRightSide("a", "b", c.offset, c.size, rand) - if err != nil { - t.Fatal(err) - } - - bs := []azure.BlockStatus{} - for _, v := range bx { - bs = append(bs, v.Status) - } - - if !reflect.DeepEqual(bs, c.expectedPattern) { - t.Logf("Committed blocks %v", bw) - t.Fatalf("For offset %v-size:%v: Expected pattern: %v, Got: %v\n(Returned: %v)", c.offset, c.size, c.expectedPattern, bs, bx) - } - if rw.bs.PutBlockList("a", "b", bx); err != nil { - t.Fatal(err) - } - r, err := rw.bs.GetBlob("a", "b") - if err != nil { - t.Fatal(err) - } - cout, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - outBlob := string(cout) - if outBlob != c.expectedBlob { - t.Fatalf("For offset %v-size:%v: wrong blob contents: %v, expected: %v", c.offset, c.size, outBlob, c.expectedBlob) - } - } -} - -func TestRandomWriter_Write_NewBlob(t *testing.T) { - var ( - s = NewStorageSimulator() - rw = newRandomBlobWriter(&s, 1024*3) // 3 KB blocks - blob = randomContents(1024 * 7) // 7 KB blob - ) - if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - if _, err := rw.WriteBlobAt("a", "b", 10, bytes.NewReader(blob)); err == nil { - t.Fatal("expected error, got nil") - } - if _, err := rw.WriteBlobAt("a", "b", 100000, bytes.NewReader(blob)); err == nil { - t.Fatal("expected error, got nil") - } - if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(blob)); err != nil { - t.Fatal(err) - } else if expected := int64(len(blob)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if len(bx.CommittedBlocks) != 3 { - t.Fatalf("got wrong number of committed blocks: %v", len(bx.CommittedBlocks)) - } - - // Replace first 512 bytes - leftChunk := randomContents(512) - blob = append(leftChunk, blob[512:]...) - if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(leftChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(leftChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 4; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v", len(bx.CommittedBlocks), expected) - } - - // Replace last 512 bytes with 1024 bytes - rightChunk := randomContents(1024) - offset := int64(len(blob) - 512) - blob = append(blob[:offset], rightChunk...) - if nn, err := rw.WriteBlobAt("a", "b", offset, bytes.NewReader(rightChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(rightChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 5; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v", len(bx.CommittedBlocks), expected) - } - - // Replace 2K-4K (overlaps 2 blocks from L/R) - newChunk := randomContents(1024 * 2) - offset = 1024 * 2 - blob = append(append(blob[:offset], newChunk...), blob[offset+int64(len(newChunk)):]...) - if nn, err := rw.WriteBlobAt("a", "b", offset, bytes.NewReader(newChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(newChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, blob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 6; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v\n%v", len(bx.CommittedBlocks), expected, bx.CommittedBlocks) - } - - // Replace the entire blob - newBlob := randomContents(1024 * 30) - if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(newBlob)); err != nil { - t.Fatal(err) - } else if expected := int64(len(newBlob)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := rw.bs.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, newBlob) - } - if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { - t.Fatal(err) - } else if expected := 10; len(bx.CommittedBlocks) != expected { - t.Fatalf("got wrong number of committed blocks: %v, expected: %v\n%v", len(bx.CommittedBlocks), expected, bx.CommittedBlocks) - } else if expected, size := int64(1024*30), getBlobSize(bx); size != expected { - t.Fatalf("committed block size does not indicate blob size") - } -} - -func Test_getBlobSize(t *testing.T) { - // with some committed blocks - if expected, size := int64(151), getBlobSize(azure.BlockListResponse{ - CommittedBlocks: []azure.BlockResponse{ - {"A", 100}, - {"B", 50}, - {"C", 1}, - }, - UncommittedBlocks: []azure.BlockResponse{ - {"D", 200}, - }}); expected != size { - t.Fatalf("wrong blob size: %v, expected: %v", size, expected) - } - - // with no committed blocks - if expected, size := int64(0), getBlobSize(azure.BlockListResponse{ - UncommittedBlocks: []azure.BlockResponse{ - {"A", 100}, - {"B", 50}, - {"C", 1}, - {"D", 200}, - }}); expected != size { - t.Fatalf("wrong blob size: %v, expected: %v", size, expected) - } -} - -func assertBlobContents(t *testing.T, r io.Reader, expected []byte) { - out, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(out, expected) { - t.Fatalf("wrong blob contents. size: %v, expected: %v", len(out), len(expected)) - } -} - -func randomContents(length int64) []byte { - b := make([]byte, length) - for i := range b { - b[i] = byte(rand.Intn(2 << 8)) - } - return b -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go deleted file mode 100644 index 095489d224f8..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter.go +++ /dev/null @@ -1,49 +0,0 @@ -package azure - -import ( - "bytes" - "io" -) - -type blockBlobWriter interface { - GetSize(container, blob string) (int64, error) - WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) -} - -// zeroFillWriter enables writing to an offset outside a block blob's size -// by offering the chunk to the underlying writer as a contiguous data with -// the gap in between filled with NUL (zero) bytes. -type zeroFillWriter struct { - blockBlobWriter -} - -func newZeroFillWriter(b blockBlobWriter) zeroFillWriter { - w := zeroFillWriter{} - w.blockBlobWriter = b - return w -} - -// Write writes the given chunk to the specified existing blob even though -// offset is out of blob's size. The gaps are filled with zeros. Returned -// written number count does not include zeros written. -func (z *zeroFillWriter) Write(container, blob string, offset int64, chunk io.Reader) (int64, error) { - size, err := z.blockBlobWriter.GetSize(container, blob) - if err != nil { - return 0, err - } - - var reader io.Reader - var zeroPadding int64 - if offset <= size { - reader = chunk - } else { - zeroPadding = offset - size - offset = size // adjust offset to be the append index - zeros := bytes.NewReader(make([]byte, zeroPadding)) - reader = io.MultiReader(zeros, chunk) - } - - nn, err := z.blockBlobWriter.WriteBlobAt(container, blob, offset, reader) - nn -= zeroPadding - return nn, err -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter_test.go deleted file mode 100644 index 49361791ad34..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/azure/zerofillwriter_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package azure - -import ( - "bytes" - "testing" -) - -func Test_zeroFillWrite_AppendNoGap(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*1) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - firstChunk := randomContents(1024*3 + 512) - if nn, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(firstChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, firstChunk) - } - - secondChunk := randomContents(256) - if nn, err := zw.Write("a", "b", int64(len(firstChunk)), bytes.NewReader(secondChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(secondChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(firstChunk, secondChunk...)) - } - -} - -func Test_zeroFillWrite_StartWithGap(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*2) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - chunk := randomContents(1024 * 5) - padding := int64(1024*2 + 256) - if nn, err := zw.Write("a", "b", padding, bytes.NewReader(chunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(chunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(make([]byte, padding), chunk...)) - } -} - -func Test_zeroFillWrite_AppendWithGap(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*2) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - firstChunk := randomContents(1024*3 + 512) - if _, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { - t.Fatal(err) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, firstChunk) - } - - secondChunk := randomContents(256) - padding := int64(1024 * 4) - if nn, err := zw.Write("a", "b", int64(len(firstChunk))+padding, bytes.NewReader(secondChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(secondChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(firstChunk, append(make([]byte, padding), secondChunk...)...)) - } -} - -func Test_zeroFillWrite_LiesWithinSize(t *testing.T) { - s := NewStorageSimulator() - bw := newRandomBlobWriter(&s, 1024*2) - zw := newZeroFillWriter(&bw) - if err := s.CreateBlockBlob("a", "b"); err != nil { - t.Fatal(err) - } - - firstChunk := randomContents(1024 * 3) - if _, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { - t.Fatal(err) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, firstChunk) - } - - // in this case, zerofill won't be used - secondChunk := randomContents(256) - if nn, err := zw.Write("a", "b", 0, bytes.NewReader(secondChunk)); err != nil { - t.Fatal(err) - } else if expected := int64(len(secondChunk)); expected != nn { - t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) - } - if out, err := s.GetBlob("a", "b"); err != nil { - t.Fatal(err) - } else { - assertBlobContents(t, out, append(secondChunk, firstChunk[len(secondChunk):]...)) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go index c816d2d6f7c2..064bda60fdb2 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/base.go @@ -102,10 +102,10 @@ func (base *Base) PutContent(ctx context.Context, path string, content []byte) e return base.setDriverName(base.StorageDriver.PutContent(ctx, path, content)) } -// ReadStream wraps ReadStream of underlying storage driver. -func (base *Base) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +// Reader wraps Reader of underlying storage driver. +func (base *Base) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { ctx, done := context.WithTrace(ctx) - defer done("%s.ReadStream(%q, %d)", base.Name(), path, offset) + defer done("%s.Reader(%q, %d)", base.Name(), path, offset) if offset < 0 { return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} @@ -115,25 +115,21 @@ func (base *Base) ReadStream(ctx context.Context, path string, offset int64) (io return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } - rc, e := base.StorageDriver.ReadStream(ctx, path, offset) + rc, e := base.StorageDriver.Reader(ctx, path, offset) return rc, base.setDriverName(e) } -// WriteStream wraps WriteStream of underlying storage driver. -func (base *Base) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { +// Writer wraps Writer of underlying storage driver. +func (base *Base) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { ctx, done := context.WithTrace(ctx) - defer done("%s.WriteStream(%q, %d)", base.Name(), path, offset) - - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} - } + defer done("%s.Writer(%q, %v)", base.Name(), path, append) if !storagedriver.PathRegexp.MatchString(path) { - return 0, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } - i64, e := base.StorageDriver.WriteStream(ctx, path, offset, reader) - return i64, base.setDriverName(e) + writer, e := base.StorageDriver.Writer(ctx, path, append) + return writer, base.setDriverName(e) } // Stat wraps Stat of underlying storage driver. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/regulator.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/regulator.go new file mode 100644 index 000000000000..185160a4be4f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/base/regulator.go @@ -0,0 +1,145 @@ +package base + +import ( + "io" + "sync" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" +) + +type regulator struct { + storagedriver.StorageDriver + *sync.Cond + + available uint64 +} + +// NewRegulator wraps the given driver and is used to regulate concurrent calls +// to the given storage driver to a maximum of the given limit. This is useful +// for storage drivers that would otherwise create an unbounded number of OS +// threads if allowed to be called unregulated. +func NewRegulator(driver storagedriver.StorageDriver, limit uint64) storagedriver.StorageDriver { + return ®ulator{ + StorageDriver: driver, + Cond: sync.NewCond(&sync.Mutex{}), + available: limit, + } +} + +func (r *regulator) enter() { + r.L.Lock() + for r.available == 0 { + r.Wait() + } + r.available-- + r.L.Unlock() +} + +func (r *regulator) exit() { + r.L.Lock() + // We only need to signal to a waiting FS operation if we're already at the + // limit of threads used + if r.available == 0 { + r.Signal() + } + r.available++ + r.L.Unlock() +} + +// Name returns the human-readable "name" of the driver, useful in error +// messages and logging. By convention, this will just be the registration +// name, but drivers may provide other information here. +func (r *regulator) Name() string { + r.enter() + defer r.exit() + + return r.StorageDriver.Name() +} + +// GetContent retrieves the content stored at "path" as a []byte. +// This should primarily be used for small objects. +func (r *regulator) GetContent(ctx context.Context, path string) ([]byte, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.GetContent(ctx, path) +} + +// PutContent stores the []byte content at a location designated by "path". +// This should primarily be used for small objects. +func (r *regulator) PutContent(ctx context.Context, path string, content []byte) error { + r.enter() + defer r.exit() + + return r.StorageDriver.PutContent(ctx, path, content) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" +// with a given byte offset. +// May be used to resume reading a stream by providing a nonzero offset. +func (r *regulator) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Reader(ctx, path, offset) +} + +// Writer stores the contents of the provided io.ReadCloser at a +// location designated by the given path. +// May be used to resume writing a stream by providing a nonzero offset. +// The offset must be no larger than the CurrentSize for this path. +func (r *regulator) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Writer(ctx, path, append) +} + +// Stat retrieves the FileInfo for the given path, including the current +// size in bytes and the creation time. +func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.Stat(ctx, path) +} + +// List returns a list of the objects that are direct descendants of the +//given path. +func (r *regulator) List(ctx context.Context, path string) ([]string, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.List(ctx, path) +} + +// Move moves an object stored at sourcePath to destPath, removing the +// original object. +// Note: This may be no more efficient than a copy followed by a delete for +// many implementations. +func (r *regulator) Move(ctx context.Context, sourcePath string, destPath string) error { + r.enter() + defer r.exit() + + return r.StorageDriver.Move(ctx, sourcePath, destPath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (r *regulator) Delete(ctx context.Context, path string) error { + r.enter() + defer r.exit() + + return r.StorageDriver.Delete(ctx, path) +} + +// URLFor returns a URL which may be used to retrieve the content stored at +// the given path, possibly using the given options. +// May return an ErrUnsupportedMethod in certain StorageDriver +// implementations. +func (r *regulator) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + r.enter() + defer r.exit() + + return r.StorageDriver.URLFor(ctx, path, options) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go index e84f0026bdcd..a9c04ec59352 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/factory/factory.go @@ -11,7 +11,14 @@ import ( var driverFactories = make(map[string]StorageDriverFactory) // StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces -// Storage drivers should call Register() with a factory to make the driver available by name +// Storage drivers should call Register() with a factory to make the driver available by name. +// Individual StorageDriver implementations generally register with the factory via the Register +// func (below) in their init() funcs, and as such they should be imported anonymously before use. +// See below for an example of how to register and get a StorageDriver for S3 +// +// import _ "github.com/docker/distribution/registry/storage/driver/s3-aws" +// s3Driver, err = factory.Create("s3", storageParams) +// // assuming no error, s3Driver is the StorageDriver that communicates with S3 according to storageParams type StorageDriverFactory interface { // Create returns a new storagedriver.StorageDriver with the given parameters // Parameters will vary by driver and may be ignored @@ -21,6 +28,8 @@ type StorageDriverFactory interface { // Register makes a storage driver available by the provided name. // If Register is called twice with the same name or if driver factory is nil, it panics. +// Additionally, it is not concurrency safe. Most Storage Drivers call this function +// in their init() functions. See the documentation for StorageDriverFactory for more. func Register(name string, factory StorageDriverFactory) { if factory == nil { panic("Must not provide nil StorageDriverFactory") diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go index 480bd6873a38..649e2bc23483 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver.go @@ -1,12 +1,15 @@ package filesystem import ( + "bufio" "bytes" "fmt" "io" "io/ioutil" "os" "path" + "reflect" + "strconv" "time" "github.com/docker/distribution/context" @@ -15,8 +18,23 @@ import ( "github.com/docker/distribution/registry/storage/driver/factory" ) -const driverName = "filesystem" -const defaultRootDirectory = "/var/lib/registry" +const ( + driverName = "filesystem" + defaultRootDirectory = "/var/lib/registry" + defaultMaxThreads = uint64(100) + + // minThreads is the minimum value for the maxthreads configuration + // parameter. If the driver's parameters are less than this we set + // the parameters to minThreads + minThreads = uint64(25) +) + +// DriverParameters represents all configuration options available for the +// filesystem driver +type DriverParameters struct { + RootDirectory string + MaxThreads uint64 +} func init() { factory.Register(driverName, &filesystemDriverFactory{}) @@ -26,7 +44,7 @@ func init() { type filesystemDriverFactory struct{} func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters), nil + return FromParameters(parameters) } type driver struct { @@ -46,25 +64,72 @@ type Driver struct { // FromParameters constructs a new Driver with a given parameters map // Optional Parameters: // - rootdirectory -func FromParameters(parameters map[string]interface{}) *Driver { - var rootDirectory = defaultRootDirectory +// - maxthreads +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + params, err := fromParametersImpl(parameters) + if err != nil || params == nil { + return nil, err + } + return New(*params), nil +} + +func fromParametersImpl(parameters map[string]interface{}) (*DriverParameters, error) { + var ( + err error + maxThreads = defaultMaxThreads + rootDirectory = defaultRootDirectory + ) + if parameters != nil { - rootDir, ok := parameters["rootdirectory"] - if ok { + if rootDir, ok := parameters["rootdirectory"]; ok { rootDirectory = fmt.Sprint(rootDir) } + + // Get maximum number of threads for blocking filesystem operations, + // if specified + threads := parameters["maxthreads"] + switch v := threads.(type) { + case string: + if maxThreads, err = strconv.ParseUint(v, 0, 64); err != nil { + return nil, fmt.Errorf("maxthreads parameter must be an integer, %v invalid", threads) + } + case uint64: + maxThreads = v + case int, int32, int64: + val := reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Int() + // If threads is negative casting to uint64 will wrap around and + // give you the hugest thread limit ever. Let's be sensible, here + if val > 0 { + maxThreads = uint64(val) + } + case uint, uint32: + maxThreads = reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Uint() + case nil: + // do nothing + default: + return nil, fmt.Errorf("invalid value for maxthreads: %#v", threads) + } + + if maxThreads < minThreads { + maxThreads = minThreads + } + } + + params := &DriverParameters{ + RootDirectory: rootDirectory, + MaxThreads: maxThreads, } - return New(rootDirectory) + return params, nil } // New constructs a new Driver with a given rootDirectory -func New(rootDirectory string) *Driver { +func New(params DriverParameters) *Driver { + fsDriver := &driver{rootDirectory: params.RootDirectory} + return &Driver{ baseEmbed: baseEmbed{ Base: base.Base{ - StorageDriver: &driver{ - rootDirectory: rootDirectory, - }, + StorageDriver: base.NewRegulator(fsDriver, params.MaxThreads), }, }, } @@ -78,7 +143,7 @@ func (d *driver) Name() string { // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - rc, err := d.ReadStream(ctx, path, 0) + rc, err := d.Reader(ctx, path, 0) if err != nil { return nil, err } @@ -94,16 +159,22 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { - if _, err := d.WriteStream(ctx, subPath, 0, bytes.NewReader(contents)); err != nil { + writer, err := d.Writer(ctx, subPath, false) + if err != nil { return err } - - return os.Truncate(d.fullPath(subPath), int64(len(contents))) + defer writer.Close() + _, err = io.Copy(writer, bytes.NewReader(contents)) + if err != nil { + writer.Cancel() + return err + } + return writer.Commit() } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// Reader retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) if err != nil { if os.IsNotExist(err) { @@ -125,40 +196,36 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. return file, nil } -// WriteStream stores the contents of the provided io.Reader at a location -// designated by the given path. -func (d *driver) WriteStream(ctx context.Context, subPath string, offset int64, reader io.Reader) (nn int64, err error) { - // TODO(stevvooe): This needs to be a requirement. - // if !path.IsAbs(subPath) { - // return fmt.Errorf("absolute path required: %q", subPath) - // } - +func (d *driver) Writer(ctx context.Context, subPath string, append bool) (storagedriver.FileWriter, error) { fullPath := d.fullPath(subPath) parentDir := path.Dir(fullPath) - if err := os.MkdirAll(parentDir, 0755); err != nil { - return 0, err + if err := os.MkdirAll(parentDir, 0777); err != nil { + return nil, err } - fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0644) + fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666) if err != nil { - // TODO(stevvooe): A few missing conditions in storage driver: - // 1. What if the path is already a directory? - // 2. Should number 1 be exposed explicitly in storagedriver? - // 2. Can this path not exist, even if we create above? - return 0, err + return nil, err } - defer fp.Close() - nn, err = fp.Seek(offset, os.SEEK_SET) - if err != nil { - return 0, err - } + var offset int64 - if nn != offset { - return 0, fmt.Errorf("bad seek to %v, expected %v in fp=%v", offset, nn, fp) + if !append { + err := fp.Truncate(0) + if err != nil { + fp.Close() + return nil, err + } + } else { + n, err := fp.Seek(0, os.SEEK_END) + if err != nil { + fp.Close() + return nil, err + } + offset = int64(n) } - return io.Copy(fp, reader) + return newFileWriter(fp, offset), nil } // Stat retrieves the FileInfo for the given path, including the current size @@ -286,3 +353,88 @@ func (fi fileInfo) ModTime() time.Time { func (fi fileInfo) IsDir() bool { return fi.FileInfo.IsDir() } + +type fileWriter struct { + file *os.File + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func newFileWriter(file *os.File, size int64) *fileWriter { + return &fileWriter{ + file: file, + size: size, + bw: bufio.NewWriter(file), + } +} + +func (fw *fileWriter) Write(p []byte) (int, error) { + if fw.closed { + return 0, fmt.Errorf("already closed") + } else if fw.committed { + return 0, fmt.Errorf("already committed") + } else if fw.cancelled { + return 0, fmt.Errorf("already cancelled") + } + n, err := fw.bw.Write(p) + fw.size += int64(n) + return n, err +} + +func (fw *fileWriter) Size() int64 { + return fw.size +} + +func (fw *fileWriter) Close() error { + if fw.closed { + return fmt.Errorf("already closed") + } + + if err := fw.bw.Flush(); err != nil { + return err + } + + if err := fw.file.Sync(); err != nil { + return err + } + + if err := fw.file.Close(); err != nil { + return err + } + fw.closed = true + return nil +} + +func (fw *fileWriter) Cancel() error { + if fw.closed { + return fmt.Errorf("already closed") + } + + fw.cancelled = true + fw.file.Close() + return os.Remove(fw.file.Name()) +} + +func (fw *fileWriter) Commit() error { + if fw.closed { + return fmt.Errorf("already closed") + } else if fw.committed { + return fmt.Errorf("already committed") + } else if fw.cancelled { + return fmt.Errorf("already cancelled") + } + + if err := fw.bw.Flush(); err != nil { + return err + } + + if err := fw.file.Sync(); err != nil { + return err + } + + fw.committed = true + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go index 8b48b4312139..3be859239b80 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/filesystem/driver_test.go @@ -3,6 +3,7 @@ package filesystem import ( "io/ioutil" "os" + "reflect" "testing" storagedriver "github.com/docker/distribution/registry/storage/driver" @@ -20,7 +21,93 @@ func init() { } defer os.Remove(root) + driver, err := FromParameters(map[string]interface{}{ + "rootdirectory": root, + }) + if err != nil { + panic(err) + } + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { - return New(root), nil + return driver, nil }, testsuites.NeverSkip) } + +func TestFromParametersImpl(t *testing.T) { + + tests := []struct { + params map[string]interface{} // techincally the yaml can contain anything + expected DriverParameters + pass bool + }{ + // check we use default threads and root dirs + { + params: map[string]interface{}{}, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: defaultMaxThreads, + }, + pass: true, + }, + // Testing initiation with a string maxThreads which can't be parsed + { + params: map[string]interface{}{ + "maxthreads": "fail", + }, + expected: DriverParameters{}, + pass: false, + }, + { + params: map[string]interface{}{ + "maxthreads": "100", + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: uint64(100), + }, + pass: true, + }, + { + params: map[string]interface{}{ + "maxthreads": 100, + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: uint64(100), + }, + pass: true, + }, + // check that we use minimum thread counts + { + params: map[string]interface{}{ + "maxthreads": 1, + }, + expected: DriverParameters{ + RootDirectory: defaultRootDirectory, + MaxThreads: minThreads, + }, + pass: true, + }, + } + + for _, item := range tests { + params, err := fromParametersImpl(item.params) + + if !item.pass { + // We only need to assert that expected failures have an error + if err == nil { + t.Fatalf("expected error configuring filesystem driver with invalid param: %+v", item.params) + } + continue + } + + if err != nil { + t.Fatalf("unexpected error creating filesystem driver: %s", err) + } + // Note that we get a pointer to params back + if !reflect.DeepEqual(*params, item.expected) { + t.Fatalf("unexpected params from filesystem driver. expected %+v, got %+v", item.expected, params) + } + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go index 4cef972cb690..abe0b9f68f8b 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/gcs.go @@ -7,11 +7,8 @@ // Because gcs is a key, value store the Stat call does not support last modification // time for directories (directories are an abstraction for key, value stores) // -// Keep in mind that gcs guarantees only eventual consistency, so do not assume -// that a successful write will mean immediate access to the data written (although -// in most regions a new object put has guaranteed read after write). The only true -// guarantee is that once you call Stat and receive a certain file size, that much of -// the file is already accessible. +// Note that the contents of incomplete uploads are not accessible even though +// Stat returns their length // // +build include_gcs @@ -25,33 +22,51 @@ import ( "math/rand" "net/http" "net/url" + "reflect" + "regexp" "sort" + "strconv" "strings" "time" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" - + "golang.org/x/oauth2/jwt" "google.golang.org/api/googleapi" - storageapi "google.golang.org/api/storage/v1" "google.golang.org/cloud" "google.golang.org/cloud/storage" + "github.com/Sirupsen/logrus" + ctx "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" ) -const driverName = "gcs" -const dummyProjectID = "" +const ( + driverName = "gcs" + dummyProjectID = "" + + uploadSessionContentType = "application/x-docker-upload-session" + minChunkSize = 256 * 1024 + defaultChunkSize = 20 * minChunkSize + + maxTries = 5 +) -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +var rangeHeader = regexp.MustCompile(`^bytes=([0-9])+-([0-9]+)$`) + +// driverParameters is a struct that encapsulates all of the driver parameters after all values have been set type driverParameters struct { bucket string - keyfile string + config *jwt.Config + email string + privateKey []byte + client *http.Client rootDirectory string + chunkSize int } func init() { @@ -74,31 +89,75 @@ type driver struct { email string privateKey []byte rootDirectory string + chunkSize int } // FromParameters constructs a new Driver with a given parameters map // Required parameters: // - bucket func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - bucket, ok := parameters["bucket"] if !ok || fmt.Sprint(bucket) == "" { return nil, fmt.Errorf("No bucket parameter provided") } - keyfile, ok := parameters["keyfile"] - if !ok { - keyfile = "" - } - rootDirectory, ok := parameters["rootdirectory"] if !ok { rootDirectory = "" } + + chunkSize := defaultChunkSize + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.Atoi(v) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int, uint, int32, uint32, uint64, int64: + chunkSize = int(reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int()) + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + + if chunkSize%minChunkSize != 0 { + return nil, fmt.Errorf("chunksize should be a multiple of %d", minChunkSize) + } + } + + var ts oauth2.TokenSource + jwtConf := new(jwt.Config) + if keyfile, ok := parameters["keyfile"]; ok { + jsonKey, err := ioutil.ReadFile(fmt.Sprint(keyfile)) + if err != nil { + return nil, err + } + jwtConf, err = google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl) + if err != nil { + return nil, err + } + ts = jwtConf.TokenSource(context.Background()) + } else { + var err error + ts, err = google.DefaultTokenSource(context.Background(), storage.ScopeFullControl) + if err != nil { + return nil, err + } + } + params := driverParameters{ - fmt.Sprint(bucket), - fmt.Sprint(keyfile), - fmt.Sprint(rootDirectory), + bucket: fmt.Sprint(bucket), + rootDirectory: fmt.Sprint(rootDirectory), + email: jwtConf.Email, + privateKey: jwtConf.PrivateKey, + client: oauth2.NewClient(context.Background(), ts), + chunkSize: chunkSize, } return New(params) @@ -106,42 +165,22 @@ func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDri // New constructs a new driver func New(params driverParameters) (storagedriver.StorageDriver, error) { - var ts oauth2.TokenSource - var err error rootDirectory := strings.Trim(params.rootDirectory, "/") if rootDirectory != "" { rootDirectory += "/" } + if params.chunkSize <= 0 || params.chunkSize%minChunkSize != 0 { + return nil, fmt.Errorf("Invalid chunksize: %d is not a positive multiple of %d", params.chunkSize, minChunkSize) + } d := &driver{ bucket: params.bucket, rootDirectory: rootDirectory, + email: params.email, + privateKey: params.privateKey, + client: params.client, + chunkSize: params.chunkSize, } - if params.keyfile == "" { - ts, err = google.DefaultTokenSource(context.Background(), storage.ScopeFullControl) - if err != nil { - return nil, err - } - } else { - jsonKey, err := ioutil.ReadFile(params.keyfile) - if err != nil { - return nil, err - } - conf, err := google.JWTConfigFromJSON( - jsonKey, - storage.ScopeFullControl, - ) - if err != nil { - return nil, err - } - ts = conf.TokenSource(context.Background()) - d.email = conf.Email - d.privateKey = conf.PrivateKey - } - client := oauth2.NewClient(context.Background(), ts) - d.client = client - if err != nil { - return nil, err - } + return &base.Base{ StorageDriver: d, }, nil @@ -156,7 +195,17 @@ func (d *driver) Name() string { // GetContent retrieves the content stored at "path" as a []byte. // This should primarily be used for small objects. func (d *driver) GetContent(context ctx.Context, path string) ([]byte, error) { - rc, err := d.ReadStream(context, path, 0) + gcsContext := d.context(context) + name := d.pathToKey(path) + var rc io.ReadCloser + err := retry(func() error { + var err error + rc, err = storage.NewReader(gcsContext, d.bucket, name) + return err + }) + if err == storage.ErrObjectNotExist { + return nil, storagedriver.PathNotFoundError{Path: path} + } if err != nil { return nil, err } @@ -172,25 +221,53 @@ func (d *driver) GetContent(context ctx.Context, path string) ([]byte, error) { // PutContent stores the []byte content at a location designated by "path". // This should primarily be used for small objects. func (d *driver) PutContent(context ctx.Context, path string, contents []byte) error { - wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) - wc.ContentType = "application/octet-stream" - defer wc.Close() - _, err := wc.Write(contents) - return err + return retry(func() error { + wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) + wc.ContentType = "application/octet-stream" + return putContentsClose(wc, contents) + }) } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" +// Reader retrieves an io.ReadCloser for the content stored at "path" // with a given byte offset. // May be used to resume reading a stream by providing a nonzero offset. -func (d *driver) ReadStream(context ctx.Context, path string, offset int64) (io.ReadCloser, error) { - name := d.pathToKey(path) +func (d *driver) Reader(context ctx.Context, path string, offset int64) (io.ReadCloser, error) { + res, err := getObject(d.client, d.bucket, d.pathToKey(path), offset) + if err != nil { + if res != nil { + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return nil, storagedriver.PathNotFoundError{Path: path} + } + if res.StatusCode == http.StatusRequestedRangeNotSatisfiable { + res.Body.Close() + obj, err := storageStatObject(d.context(context), d.bucket, d.pathToKey(path)) + if err != nil { + return nil, err + } + if offset == int64(obj.Size) { + return ioutil.NopCloser(bytes.NewReader([]byte{})), nil + } + return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} + } + } + return nil, err + } + if res.Header.Get("Content-Type") == uploadSessionContentType { + defer res.Body.Close() + return nil, storagedriver.PathNotFoundError{Path: path} + } + return res.Body, nil +} + +func getObject(client *http.Client, bucket string, name string, offset int64) (*http.Response, error) { // copied from google.golang.org/cloud/storage#NewReader : // to set the additional "Range" header u := &url.URL{ Scheme: "https", Host: "storage.googleapis.com", - Path: fmt.Sprintf("/%s/%s", d.bucket, name), + Path: fmt.Sprintf("/%s/%s", bucket, name), } req, err := http.NewRequest("GET", u.String(), nil) if err != nil { @@ -199,132 +276,263 @@ func (d *driver) ReadStream(context ctx.Context, path string, offset int64) (io. if offset > 0 { req.Header.Set("Range", fmt.Sprintf("bytes=%v-", offset)) } - res, err := d.client.Do(req) + var res *http.Response + err = retry(func() error { + var err error + res, err = client.Do(req) + return err + }) if err != nil { return nil, err } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return nil, storagedriver.PathNotFoundError{Path: path} + return res, googleapi.CheckMediaResponse(res) +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(context ctx.Context, path string, append bool) (storagedriver.FileWriter, error) { + writer := &writer{ + client: d.client, + bucket: d.bucket, + name: d.pathToKey(path), + buffer: make([]byte, d.chunkSize), } - if res.StatusCode == http.StatusRequestedRangeNotSatisfiable { - res.Body.Close() - obj, err := storage.StatObject(d.context(context), d.bucket, name) + + if append { + err := writer.init(path) if err != nil { return nil, err } - if offset == int64(obj.Size) { - return ioutil.NopCloser(bytes.NewReader([]byte{})), nil - } - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - if res.StatusCode < 200 || res.StatusCode > 299 { - res.Body.Close() - return nil, fmt.Errorf("storage: can't read object %v/%v, status code: %v", d.bucket, name, res.Status) } - return res.Body, nil + return writer, nil } -// WriteStream stores the contents of the provided io.ReadCloser at a -// location designated by the given path. -// May be used to resume writing a stream by providing a nonzero offset. -// The offset must be no larger than the CurrentSize for this path. -func (d *driver) WriteStream(context ctx.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} +type writer struct { + client *http.Client + bucket string + name string + size int64 + offset int64 + closed bool + sessionURI string + buffer []byte + buffSize int +} + +// Cancel removes any written content from this FileWriter. +func (w *writer) Cancel() error { + err := w.checkClosed() + if err != nil { + return err } + w.closed = true + err = storageDeleteObject(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + if err != nil { + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + err = nil + } + } + } + return err +} - if offset == 0 { - return d.writeCompletely(context, path, 0, reader) +func (w *writer) Close() error { + if w.closed { + return nil } + w.closed = true - service, err := storageapi.New(d.client) + err := w.writeChunk() if err != nil { - return 0, err - } - objService := storageapi.NewObjectsService(service) - var obj *storageapi.Object - err = retry(5, func() error { - o, err := objService.Get(d.bucket, d.pathToKey(path)).Do() - obj = o return err - }) - // obj, err := retry(5, objService.Get(d.bucket, d.pathToKey(path)).Do) - if err != nil { - return 0, err } - // cannot append more chunks, so redo from scratch - if obj.ComponentCount >= 1023 { - return d.writeCompletely(context, path, offset, reader) + // Copy the remaining bytes from the buffer to the upload session + // Normally buffSize will be smaller than minChunkSize. However, in the + // unlikely event that the upload session failed to start, this number could be higher. + // In this case we can safely clip the remaining bytes to the minChunkSize + if w.buffSize > minChunkSize { + w.buffSize = minChunkSize } - // skip from reader - objSize := int64(obj.Size) - nn, err := skip(reader, objSize-offset) + // commit the writes by updating the upload session + err = retry(func() error { + wc := storage.NewWriter(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + wc.ContentType = uploadSessionContentType + wc.Metadata = map[string]string{ + "Session-URI": w.sessionURI, + "Offset": strconv.FormatInt(w.offset, 10), + } + return putContentsClose(wc, w.buffer[0:w.buffSize]) + }) if err != nil { - return nn, err + return err } + w.size = w.offset + int64(w.buffSize) + w.buffSize = 0 + return nil +} - // Size <= offset - partName := fmt.Sprintf("%v#part-%d#", d.pathToKey(path), obj.ComponentCount) - gcsContext := d.context(context) - wc := storage.NewWriter(gcsContext, d.bucket, partName) - wc.ContentType = "application/octet-stream" - - if objSize < offset { - err = writeZeros(wc, offset-objSize) +func putContentsClose(wc *storage.Writer, contents []byte) error { + size := len(contents) + var nn int + var err error + for nn < size { + n, err := wc.Write(contents[nn:size]) + nn += n if err != nil { - wc.CloseWithError(err) - return nn, err + break } } - n, err := io.Copy(wc, reader) if err != nil { wc.CloseWithError(err) - return nn, err + return err } - err = wc.Close() - if err != nil { - return nn, err + return wc.Close() +} + +// Commit flushes all content written to this FileWriter and makes it +// available for future calls to StorageDriver.GetContent and +// StorageDriver.Reader. +func (w *writer) Commit() error { + + if err := w.checkClosed(); err != nil { + return err } - // wc was closed succesfully, so the temporary part exists, schedule it for deletion at the end - // of the function - defer storage.DeleteObject(gcsContext, d.bucket, partName) + w.closed = true - req := &storageapi.ComposeRequest{ - Destination: &storageapi.Object{Bucket: obj.Bucket, Name: obj.Name, ContentType: obj.ContentType}, - SourceObjects: []*storageapi.ComposeRequestSourceObjects{ - { - Name: obj.Name, - Generation: obj.Generation, - }, { - Name: partName, - Generation: wc.Object().Generation, - }}, + // no session started yet just perform a simple upload + if w.sessionURI == "" { + err := retry(func() error { + wc := storage.NewWriter(cloud.NewContext(dummyProjectID, w.client), w.bucket, w.name) + wc.ContentType = "application/octet-stream" + return putContentsClose(wc, w.buffer[0:w.buffSize]) + }) + if err != nil { + return err + } + w.size = w.offset + int64(w.buffSize) + w.buffSize = 0 + return nil } + size := w.offset + int64(w.buffSize) + var nn int + // loop must be performed at least once to ensure the file is committed even when + // the buffer is empty + for { + n, err := putChunk(w.client, w.sessionURI, w.buffer[nn:w.buffSize], w.offset, size) + nn += int(n) + w.offset += n + w.size = w.offset + if err != nil { + w.buffSize = copy(w.buffer, w.buffer[nn:w.buffSize]) + return err + } + if nn == w.buffSize { + break + } + } + w.buffSize = 0 + return nil +} - err = retry(5, func() error { _, err := objService.Compose(d.bucket, obj.Name, req).Do(); return err }) - if err == nil { - nn = nn + n +func (w *writer) checkClosed() error { + if w.closed { + return fmt.Errorf("Writer already closed") + } + return nil +} + +func (w *writer) writeChunk() error { + var err error + // chunks can be uploaded only in multiples of minChunkSize + // chunkSize is a multiple of minChunkSize less than or equal to buffSize + chunkSize := w.buffSize - (w.buffSize % minChunkSize) + if chunkSize == 0 { + return nil + } + // if their is no sessionURI yet, obtain one by starting the session + if w.sessionURI == "" { + w.sessionURI, err = startSession(w.client, w.bucket, w.name) + } + if err != nil { + return err } + nn, err := putChunk(w.client, w.sessionURI, w.buffer[0:chunkSize], w.offset, -1) + w.offset += nn + if w.offset > w.size { + w.size = w.offset + } + // shift the remaining bytes to the start of the buffer + w.buffSize = copy(w.buffer, w.buffer[int(nn):w.buffSize]) + return err +} + +func (w *writer) Write(p []byte) (int, error) { + err := w.checkClosed() + if err != nil { + return 0, err + } + + var nn int + for nn < len(p) { + n := copy(w.buffer[w.buffSize:], p[nn:]) + w.buffSize += n + if w.buffSize == cap(w.buffer) { + err = w.writeChunk() + if err != nil { + break + } + } + nn += n + } return nn, err } +// Size returns the number of bytes written to this FileWriter. +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) init(path string) error { + res, err := getObject(w.client, w.bucket, w.name, 0) + if err != nil { + return err + } + defer res.Body.Close() + if res.Header.Get("Content-Type") != uploadSessionContentType { + return storagedriver.PathNotFoundError{Path: path} + } + offset, err := strconv.ParseInt(res.Header.Get("X-Goog-Meta-Offset"), 10, 64) + if err != nil { + return err + } + buffer, err := ioutil.ReadAll(res.Body) + if err != nil { + return err + } + w.sessionURI = res.Header.Get("X-Goog-Meta-Session-URI") + w.buffSize = copy(w.buffer, buffer) + w.offset = offset + w.size = offset + int64(w.buffSize) + return nil +} + type request func() error -func retry(maxTries int, req request) error { +func retry(req request) error { backoff := time.Second var err error for i := 0; i < maxTries; i++ { - err := req() + err = req() if err == nil { return nil } - status := err.(*googleapi.Error) - if status == nil || (status.Code != 429 && status.Code < http.StatusInternalServerError) { + status, ok := err.(*googleapi.Error) + if !ok || (status.Code != 429 && status.Code < http.StatusInternalServerError) { return err } @@ -336,61 +544,17 @@ func retry(maxTries int, req request) error { return err } -func (d *driver) writeCompletely(context ctx.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) - wc.ContentType = "application/octet-stream" - defer wc.Close() - - // Copy the first offset bytes of the existing contents - // (padded with zeros if needed) into the writer - if offset > 0 { - existing, err := d.ReadStream(context, path, 0) - if err != nil { - return 0, err - } - defer existing.Close() - n, err := io.CopyN(wc, existing, offset) - if err == io.EOF { - err = writeZeros(wc, offset-n) - } - if err != nil { - return 0, err - } - } - return io.Copy(wc, reader) -} - -func skip(reader io.Reader, count int64) (int64, error) { - if count <= 0 { - return 0, nil - } - return io.CopyN(ioutil.Discard, reader, count) -} - -func writeZeros(wc io.Writer, count int64) error { - buf := make([]byte, 32*1024) - for count > 0 { - size := cap(buf) - if int64(size) > count { - size = int(count) - } - n, err := wc.Write(buf[0:size]) - if err != nil { - return err - } - count = count - int64(n) - } - return nil -} - // Stat retrieves the FileInfo for the given path, including the current // size in bytes and the creation time. func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, error) { var fi storagedriver.FileInfoFields //try to get as file gcsContext := d.context(context) - obj, err := storage.StatObject(gcsContext, d.bucket, d.pathToKey(path)) + obj, err := storageStatObject(gcsContext, d.bucket, d.pathToKey(path)) if err == nil { + if obj.ContentType == uploadSessionContentType { + return nil, storagedriver.PathNotFoundError{Path: path} + } fi = storagedriver.FileInfoFields{ Path: path, Size: obj.Size, @@ -407,7 +571,7 @@ func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, query.Prefix = dirpath query.MaxResults = 1 - objects, err := storage.ListObjects(gcsContext, d.bucket, query) + objects, err := storageListObjects(gcsContext, d.bucket, query) if err != nil { return nil, err } @@ -435,21 +599,16 @@ func (d *driver) List(context ctx.Context, path string) ([]string, error) { query.Prefix = d.pathToDirKey(path) list := make([]string, 0, 64) for { - objects, err := storage.ListObjects(d.context(context), d.bucket, query) + objects, err := storageListObjects(d.context(context), d.bucket, query) if err != nil { return nil, err } for _, object := range objects.Results { // GCS does not guarantee strong consistency between - // DELETE and LIST operationsCheck that the object is not deleted, - // so filter out any objects with a non-zero time-deleted - if object.Deleted.IsZero() { - name := object.Name - // Ignore objects with names that end with '#' (these are uploaded parts) - if name[len(name)-1] != '#' { - name = d.keyToPath(name) - list = append(list, name) - } + // DELETE and LIST operations. Check that the object is not deleted, + // and filter out any objects with a non-zero time-deleted + if object.Deleted.IsZero() && object.ContentType != uploadSessionContentType { + list = append(list, d.keyToPath(object.Name)) } } for _, subpath := range objects.Prefixes { @@ -461,58 +620,34 @@ func (d *driver) List(context ctx.Context, path string) ([]string, error) { break } } + if path != "/" && len(list) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in Google Cloud Storage. + return nil, storagedriver.PathNotFoundError{Path: path} + } return list, nil } // Move moves an object stored at sourcePath to destPath, removing the // original object. func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) error { - prefix := d.pathToDirKey(sourcePath) gcsContext := d.context(context) - keys, err := d.listAll(gcsContext, prefix) + _, err := storageCopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) if err != nil { - return err - } - if len(keys) > 0 { - destPrefix := d.pathToDirKey(destPath) - copies := make([]string, 0, len(keys)) - sort.Strings(keys) - var err error - for _, key := range keys { - dest := destPrefix + key[len(prefix):] - _, err = storage.CopyObject(gcsContext, d.bucket, key, d.bucket, dest, nil) - if err == nil { - copies = append(copies, dest) - } else { - break - } - } - // if an error occurred, attempt to cleanup the copies made - if err != nil { - for i := len(copies) - 1; i >= 0; i-- { - _ = storage.DeleteObject(gcsContext, d.bucket, copies[i]) - } - return err - } - // delete originals - for i := len(keys) - 1; i >= 0; i-- { - err2 := storage.DeleteObject(gcsContext, d.bucket, keys[i]) - if err2 != nil { - err = err2 - } - } - return err - } - _, err = storage.CopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) - if err != nil { - if status := err.(*googleapi.Error); status != nil { + if status, ok := err.(*googleapi.Error); ok { if status.Code == http.StatusNotFound { return storagedriver.PathNotFoundError{Path: sourcePath} } } return err } - return storage.DeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) + err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) + // if deleting the file fails, log the error, but do not fail; the file was successfully copied, + // and the original should eventually be cleaned when purging the uploads folder. + if err != nil { + logrus.Infof("error deleting file: %v due to %v", sourcePath, err) + } + return nil } // listAll recursively lists all names of objects stored at "prefix" and its subpaths. @@ -522,14 +657,14 @@ func (d *driver) listAll(context context.Context, prefix string) ([]string, erro query.Prefix = prefix query.Versions = false for { - objects, err := storage.ListObjects(d.context(context), d.bucket, query) + objects, err := storageListObjects(d.context(context), d.bucket, query) if err != nil { return nil, err } for _, obj := range objects.Results { // GCS does not guarantee strong consistency between - // DELETE and LIST operationsCheck that the object is not deleted, - // so filter out any objects with a non-zero time-deleted + // DELETE and LIST operations. Check that the object is not deleted, + // and filter out any objects with a non-zero time-deleted if obj.Deleted.IsZero() { list = append(list, obj.Name) } @@ -553,15 +688,24 @@ func (d *driver) Delete(context ctx.Context, path string) error { if len(keys) > 0 { sort.Sort(sort.Reverse(sort.StringSlice(keys))) for _, key := range keys { - if err := storage.DeleteObject(gcsContext, d.bucket, key); err != nil { + err := storageDeleteObject(gcsContext, d.bucket, key) + // GCS only guarantees eventual consistency, so listAll might return + // paths that no longer exist. If this happens, just ignore any not + // found error + if status, ok := err.(*googleapi.Error); ok { + if status.Code == http.StatusNotFound { + err = nil + } + } + if err != nil { return err } } return nil } - err = storage.DeleteObject(gcsContext, d.bucket, d.pathToKey(path)) + err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(path)) if err != nil { - if status := err.(*googleapi.Error); status != nil { + if status, ok := err.(*googleapi.Error); ok { if status.Code == http.StatusNotFound { return storagedriver.PathNotFoundError{Path: path} } @@ -570,6 +714,42 @@ func (d *driver) Delete(context ctx.Context, path string) error { return err } +func storageDeleteObject(context context.Context, bucket string, name string) error { + return retry(func() error { + return storage.DeleteObject(context, bucket, name) + }) +} + +func storageStatObject(context context.Context, bucket string, name string) (*storage.Object, error) { + var obj *storage.Object + err := retry(func() error { + var err error + obj, err = storage.StatObject(context, bucket, name) + return err + }) + return obj, err +} + +func storageListObjects(context context.Context, bucket string, q *storage.Query) (*storage.Objects, error) { + var objs *storage.Objects + err := retry(func() error { + var err error + objs, err = storage.ListObjects(context, bucket, q) + return err + }) + return objs, err +} + +func storageCopyObject(context context.Context, srcBucket, srcName string, destBucket, destName string, attrs *storage.ObjectAttrs) (*storage.Object, error) { + var obj *storage.Object + err := retry(func() error { + var err error + obj, err = storage.CopyObject(context, srcBucket, srcName, destBucket, destName, attrs) + return err + }) + return obj, err +} + // URLFor returns a URL which may be used to retrieve the content stored at // the given path, possibly using the given options. // Returns ErrUnsupportedMethod if this driver has no privateKey @@ -606,6 +786,80 @@ func (d *driver) URLFor(context ctx.Context, path string, options map[string]int return storage.SignedURL(d.bucket, name, opts) } +func startSession(client *http.Client, bucket string, name string) (uri string, err error) { + u := &url.URL{ + Scheme: "https", + Host: "www.googleapis.com", + Path: fmt.Sprintf("/upload/storage/v1/b/%v/o", bucket), + RawQuery: fmt.Sprintf("uploadType=resumable&name=%v", name), + } + err = retry(func() error { + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return err + } + req.Header.Set("X-Upload-Content-Type", "application/octet-stream") + req.Header.Set("Content-Length", "0") + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + err = googleapi.CheckMediaResponse(resp) + if err != nil { + return err + } + uri = resp.Header.Get("Location") + return nil + }) + return uri, err +} + +func putChunk(client *http.Client, sessionURI string, chunk []byte, from int64, totalSize int64) (int64, error) { + bytesPut := int64(0) + err := retry(func() error { + req, err := http.NewRequest("PUT", sessionURI, bytes.NewReader(chunk)) + if err != nil { + return err + } + length := int64(len(chunk)) + to := from + length - 1 + size := "*" + if totalSize >= 0 { + size = strconv.FormatInt(totalSize, 10) + } + req.Header.Set("Content-Type", "application/octet-stream") + if from == to+1 { + req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", size)) + } else { + req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", from, to, size)) + } + req.Header.Set("Content-Length", strconv.FormatInt(length, 10)) + + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if totalSize < 0 && resp.StatusCode == 308 { + groups := rangeHeader.FindStringSubmatch(resp.Header.Get("Range")) + end, err := strconv.ParseInt(groups[2], 10, 64) + if err != nil { + return err + } + bytesPut = end - from + 1 + return nil + } + err = googleapi.CheckMediaResponse(resp) + if err != nil { + return err + } + bytesPut = to - from + 1 + return nil + }) + return bytesPut, err +} + func (d *driver) context(context ctx.Context) context.Context { return cloud.WithContext(context, dummyProjectID, d.client) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/gcs_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/gcs_test.go index 7afc4e70916b..f2808d5fc2b4 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/gcs_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/gcs/gcs_test.go @@ -7,10 +7,14 @@ import ( "os" "testing" + "fmt" ctx "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" - + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/googleapi" + "google.golang.org/cloud/storage" "gopkg.in/check.v1" ) @@ -22,39 +26,199 @@ var skipGCS func() string func init() { bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET") - keyfile := os.Getenv("REGISTRY_STORAGE_GCS_KEYFILE") credentials := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") + // Skip GCS storage driver tests if environment variable parameters are not provided + skipGCS = func() string { + if bucket == "" || credentials == "" { + return "The following environment variables must be set to enable these tests: REGISTRY_STORAGE_GCS_BUCKET, GOOGLE_APPLICATION_CREDENTIALS" + } + return "" + } + + if skipGCS() != "" { + return + } + root, err := ioutil.TempDir("", "driver-") if err != nil { panic(err) } defer os.Remove(root) + var ts oauth2.TokenSource + var email string + var privateKey []byte - gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) { + ts, err = google.DefaultTokenSource(ctx.Background(), storage.ScopeFullControl) + if err != nil { + // Assume that the file contents are within the environment variable since it exists + // but does not contain a valid file path + jwtConfig, err := google.JWTConfigFromJSON([]byte(credentials), storage.ScopeFullControl) + if err != nil { + panic(fmt.Sprintf("Error reading JWT config : %s", err)) + } + email = jwtConfig.Email + privateKey = []byte(jwtConfig.PrivateKey) + if len(privateKey) == 0 { + panic("Error reading JWT config : missing private_key property") + } + if email == "" { + panic("Error reading JWT config : missing client_email property") + } + ts = jwtConfig.TokenSource(ctx.Background()) + } + gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) { parameters := driverParameters{ - bucket, - keyfile, - rootDirectory, + bucket: bucket, + rootDirectory: root, + email: email, + privateKey: privateKey, + client: oauth2.NewClient(ctx.Background(), ts), + chunkSize: defaultChunkSize, } return New(parameters) } - // Skip GCS storage driver tests if environment variable parameters are not provided - skipGCS = func() string { - if bucket == "" || (credentials == "" && keyfile == "") { - return "Must set REGISTRY_STORAGE_GCS_BUCKET and (GOOGLE_APPLICATION_CREDENTIALS or REGISTRY_STORAGE_GCS_KEYFILE) to run GCS tests" - } - return "" - } - testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { return gcsDriverConstructor(root) }, skipGCS) } +// Test Committing a FileWriter without having called Write +func TestCommitEmpty(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + filename := "/test" + ctx := ctx.Background() + + writer, err := driver.Writer(ctx, filename, false) + defer driver.Delete(ctx, filename) + if err != nil { + t.Fatalf("driver.Writer: unexpected error: %v", err) + } + err = writer.Commit() + if err != nil { + t.Fatalf("writer.Commit: unexpected error: %v", err) + } + err = writer.Close() + if err != nil { + t.Fatalf("writer.Close: unexpected error: %v", err) + } + if writer.Size() != 0 { + t.Fatalf("writer.Size: %d != 0", writer.Size()) + } + readContents, err := driver.GetContent(ctx, filename) + if err != nil { + t.Fatalf("driver.GetContent: unexpected error: %v", err) + } + if len(readContents) != 0 { + t.Fatalf("len(driver.GetContent(..)): %d != 0", len(readContents)) + } +} + +// Test Committing a FileWriter after having written exactly +// defaultChunksize bytes. +func TestCommit(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + filename := "/test" + ctx := ctx.Background() + + contents := make([]byte, defaultChunkSize) + writer, err := driver.Writer(ctx, filename, false) + defer driver.Delete(ctx, filename) + if err != nil { + t.Fatalf("driver.Writer: unexpected error: %v", err) + } + _, err = writer.Write(contents) + if err != nil { + t.Fatalf("writer.Write: unexpected error: %v", err) + } + err = writer.Commit() + if err != nil { + t.Fatalf("writer.Commit: unexpected error: %v", err) + } + err = writer.Close() + if err != nil { + t.Fatalf("writer.Close: unexpected error: %v", err) + } + if writer.Size() != int64(len(contents)) { + t.Fatalf("writer.Size: %d != %d", writer.Size(), len(contents)) + } + readContents, err := driver.GetContent(ctx, filename) + if err != nil { + t.Fatalf("driver.GetContent: unexpected error: %v", err) + } + if len(readContents) != len(contents) { + t.Fatalf("len(driver.GetContent(..)): %d != %d", len(readContents), len(contents)) + } +} + +func TestRetry(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + assertError := func(expected string, observed error) { + observedMsg := "" + if observed != nil { + observedMsg = observed.Error() + } + if observedMsg != expected { + t.Fatalf("expected %v, observed %v\n", expected, observedMsg) + } + } + + err := retry(func() error { + return &googleapi.Error{ + Code: 503, + Message: "google api error", + } + }) + assertError("googleapi: Error 503: google api error", err) + + err = retry(func() error { + return &googleapi.Error{ + Code: 404, + Message: "google api error", + } + }) + assertError("googleapi: Error 404: google api error", err) + + err = retry(func() error { + return fmt.Errorf("error") + }) + assertError("error", err) +} + func TestEmptyRootList(t *testing.T) { if skipGCS() != "" { t.Skip(skipGCS()) @@ -88,8 +252,12 @@ func TestEmptyRootList(t *testing.T) { if err != nil { t.Fatalf("unexpected error creating content: %v", err) } - defer rootedDriver.Delete(ctx, filename) - + defer func() { + err := rootedDriver.Delete(ctx, filename) + if err != nil { + t.Fatalf("failed to remove %v due to %v\n", filename, err) + } + }() keys, err := emptyRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { @@ -104,3 +272,40 @@ func TestEmptyRootList(t *testing.T) { } } } + +// TestMoveDirectory checks that moving a directory returns an error. +func TestMoveDirectory(t *testing.T) { + if skipGCS() != "" { + t.Skip(skipGCS()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + driver, err := gcsDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + ctx := ctx.Background() + contents := []byte("contents") + // Create a regular file. + err = driver.PutContent(ctx, "/parent/dir/foo", contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer func() { + err := driver.Delete(ctx, "/parent") + if err != nil { + t.Fatalf("failed to remove /parent due to %v\n", err) + } + }() + + err = driver.Move(ctx, "/parent/dir", "/parent/other") + if err == nil { + t.Fatalf("Moving directory /parent/dir /parent/other should have return a non-nil error\n") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go index b5735c0ac407..eb2fd1cf42de 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/inmemory/driver.go @@ -1,7 +1,6 @@ package inmemory import ( - "bytes" "fmt" "io" "io/ioutil" @@ -74,7 +73,7 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { d.mutex.RLock() defer d.mutex.RUnlock() - rc, err := d.ReadStream(ctx, path, 0) + rc, err := d.Reader(ctx, path, 0) if err != nil { return nil, err } @@ -88,7 +87,9 @@ func (d *driver) PutContent(ctx context.Context, p string, contents []byte) erro d.mutex.Lock() defer d.mutex.Unlock() - f, err := d.root.mkfile(p) + normalized := normalize(p) + + f, err := d.root.mkfile(normalized) if err != nil { // TODO(stevvooe): Again, we need to clarify when this is not a // directory in StorageDriver API. @@ -101,9 +102,9 @@ func (d *driver) PutContent(ctx context.Context, p string, contents []byte) erro return nil } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// Reader retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { d.mutex.RLock() defer d.mutex.RUnlock() @@ -111,10 +112,10 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } - path = normalize(path) - found := d.root.find(path) + normalized := normalize(path) + found := d.root.find(normalized) - if found.path() != path { + if found.path() != normalized { return nil, storagedriver.PathNotFoundError{Path: path} } @@ -125,46 +126,24 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil } -// WriteStream stores the contents of the provided io.ReadCloser at a location -// designated by the given path. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { d.mutex.Lock() defer d.mutex.Unlock() - if offset < 0 { - return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - normalized := normalize(path) f, err := d.root.mkfile(normalized) if err != nil { - return 0, fmt.Errorf("not a file") + return nil, fmt.Errorf("not a file") } - // Unlock while we are reading from the source, in case we are reading - // from the same mfs instance. This can be fixed by a more granular - // locking model. - d.mutex.Unlock() - d.mutex.RLock() // Take the readlock to block other writers. - var buf bytes.Buffer - - nn, err = buf.ReadFrom(reader) - if err != nil { - // TODO(stevvooe): This condition is odd and we may need to clarify: - // we've read nn bytes from reader but have written nothing to the - // backend. What is the correct return value? Really, the caller needs - // to know that the reader has been advanced and reattempting the - // operation is incorrect. - d.mutex.RUnlock() - d.mutex.Lock() - return nn, err + if !append { + f.truncate() } - d.mutex.RUnlock() - d.mutex.Lock() - f.WriteAt(buf.Bytes(), offset) - return nn, err + return d.newWriter(f), nil } // Stat returns info about the provided path. @@ -173,7 +152,7 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, defer d.mutex.RUnlock() normalized := normalize(path) - found := d.root.find(path) + found := d.root.find(normalized) if found.path() != normalized { return nil, storagedriver.PathNotFoundError{Path: path} @@ -260,3 +239,74 @@ func (d *driver) Delete(ctx context.Context, path string) error { func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { return "", storagedriver.ErrUnsupportedMethod{} } + +type writer struct { + d *driver + f *file + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(f *file) storagedriver.FileWriter { + return &writer{ + d: d, + f: f, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + w.d.mutex.Lock() + defer w.d.mutex.Unlock() + + return w.f.WriteAt(p, int64(len(w.f.data))) +} + +func (w *writer) Size() int64 { + w.d.mutex.RLock() + defer w.d.mutex.RUnlock() + + return int64(len(w.f.data)) +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return nil +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + + w.d.mutex.Lock() + defer w.d.mutex.Unlock() + + return w.d.root.delete(w.f.path()) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + w.committed = true + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go index 31c00afc8df5..9162c09de624 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/middleware/cloudfront/middleware.go @@ -8,9 +8,11 @@ import ( "encoding/pem" "fmt" "io/ioutil" + "net/url" + "strings" "time" - "github.com/AdRoll/goamz/cloudfront" + "github.com/aws/aws-sdk-go/service/cloudfront/sign" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" @@ -21,8 +23,9 @@ import ( // then issues HTTP Temporary Redirects to this CloudFront content URL. type cloudFrontStorageMiddleware struct { storagedriver.StorageDriver - cloudfront *cloudfront.CloudFront - duration time.Duration + urlSigner *sign.URLSigner + baseURL string + duration time.Duration } var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} @@ -33,15 +36,24 @@ var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { base, ok := options["baseurl"] if !ok { - return nil, fmt.Errorf("No baseurl provided") + return nil, fmt.Errorf("no baseurl provided") } baseURL, ok := base.(string) if !ok { return nil, fmt.Errorf("baseurl must be a string") } + if !strings.Contains(baseURL, "://") { + baseURL = "https://" + baseURL + } + if !strings.HasSuffix(baseURL, "/") { + baseURL += "/" + } + if _, err := url.Parse(baseURL); err != nil { + return nil, fmt.Errorf("invalid baseurl: %v", err) + } pk, ok := options["privatekey"] if !ok { - return nil, fmt.Errorf("No privatekey provided") + return nil, fmt.Errorf("no privatekey provided") } pkPath, ok := pk.(string) if !ok { @@ -49,7 +61,7 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o } kpid, ok := options["keypairid"] if !ok { - return nil, fmt.Errorf("No keypairid provided") + return nil, fmt.Errorf("no keypairid provided") } keypairID, ok := kpid.(string) if !ok { @@ -58,19 +70,19 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o pkBytes, err := ioutil.ReadFile(pkPath) if err != nil { - return nil, fmt.Errorf("Failed to read privatekey file: %s", err) + return nil, fmt.Errorf("failed to read privatekey file: %s", err) } block, _ := pem.Decode([]byte(pkBytes)) if block == nil { - return nil, fmt.Errorf("Failed to decode private key as an rsa private key") + return nil, fmt.Errorf("failed to decode private key as an rsa private key") } privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) if err != nil { return nil, err } - cf := cloudfront.New(baseURL, privateKey, keypairID) + urlSigner := sign.NewURLSigner(keypairID, privateKey) duration := 20 * time.Minute d, ok := options["duration"] @@ -81,13 +93,18 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o case string: dur, err := time.ParseDuration(d) if err != nil { - return nil, fmt.Errorf("Invalid duration: %s", err) + return nil, fmt.Errorf("invalid duration: %s", err) } duration = dur } } - return &cloudFrontStorageMiddleware{StorageDriver: storageDriver, cloudfront: cf, duration: duration}, nil + return &cloudFrontStorageMiddleware{ + StorageDriver: storageDriver, + urlSigner: urlSigner, + baseURL: baseURL, + duration: duration, + }, nil } // S3BucketKeyer is any type that is capable of returning the S3 bucket key @@ -106,7 +123,7 @@ func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, return lh.StorageDriver.URLFor(ctx, path, options) } - cfURL, err := lh.cloudfront.CannedSignedURL(keyer.S3BucketKey(path), "", time.Now().Add(lh.duration)) + cfURL, err := lh.urlSigner.Sign(lh.baseURL+keyer.S3BucketKey(path), time.Now().Add(lh.duration)) if err != nil { return "", err } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go index c6e4f8a32763..7ae70334608d 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/oss/oss.go @@ -20,7 +20,6 @@ import ( "reflect" "strconv" "strings" - "sync" "time" "github.com/docker/distribution/context" @@ -39,6 +38,7 @@ const driverName = "oss" const minChunkSize = 5 << 20 const defaultChunkSize = 2 * minChunkSize +const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk // listMax is the largest amount of objects you can request from OSS in a list call const listMax = 1000 @@ -74,9 +74,6 @@ type driver struct { ChunkSize int64 Encrypt bool RootDirectory string - - pool sync.Pool // pool []byte buffers used for WriteStream - zeros []byte // shared, zero-valued buffer used for WriteStream } type baseEmbed struct { @@ -98,8 +95,7 @@ type Driver struct { // - encrypt func FromParameters(parameters map[string]interface{}) (*Driver, error) { // Providing no values for these is valid in case the user is authenticating - // with an IAM on an ec2 instance (in which case the instance credentials will - // be summoned when GetAuth is called) + accessKey, ok := parameters["accesskeyid"] if !ok { return nil, fmt.Errorf("No accesskeyid parameter provided") @@ -195,13 +191,14 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { return New(params) } -// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and // bucketName func New(params DriverParameters) (*Driver, error) { client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) client.SetEndpoint(params.Endpoint) bucket := client.Bucket(params.Bucket) + client.SetDebug(false) // Validate that the given credentials have at least read permissions in the // given bucket scope. @@ -218,11 +215,6 @@ func New(params DriverParameters) (*Driver, error) { ChunkSize: params.ChunkSize, Encrypt: params.Encrypt, RootDirectory: params.RootDirectory, - zeros: make([]byte, params.ChunkSize), - } - - d.pool.New = func() interface{} { - return make([]byte, d.ChunkSize) } return &Driver{ @@ -254,9 +246,9 @@ func (d *driver) PutContent(ctx context.Context, path string, contents []byte) e return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// Reader retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { headers := make(http.Header) headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") @@ -277,343 +269,37 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. return resp.Body, nil } -// WriteStream stores the contents of the provided io.Reader at a -// location designated by the given path. The driver will know it has -// received the full contents when the reader returns io.EOF. The number -// of successfully READ bytes will be returned, even if an error is -// returned. May be used to resume writing a stream by providing a nonzero -// offset. Offsets past the current size will write from the position -// beyond the end of the file. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - partNumber := 1 - bytesRead := 0 - var putErrChan chan error - parts := []oss.Part{} - var part oss.Part - done := make(chan struct{}) // stopgap to free up waiting goroutines - - multi, err := d.Bucket.InitMulti(d.ossPath(path), d.getContentType(), getPermissions(), d.getOptions()) - if err != nil { - return 0, err - } - - buf := d.getbuf() - - // We never want to leave a dangling multipart upload, our only consistent state is - // when there is a whole object at path. This is in order to remain consistent with - // the stat call. - // - // Note that if the machine dies before executing the defer, we will be left with a dangling - // multipart upload, which will eventually be cleaned up, but we will lose all of the progress - // made prior to the machine crashing. - defer func() { - if putErrChan != nil { - if putErr := <-putErrChan; putErr != nil { - err = putErr - } - } - - if len(parts) > 0 { - if multi == nil { - // Parts should be empty if the multi is not initialized - panic("Unreachable") - } else { - if multi.Complete(parts) != nil { - multi.Abort() - } - } - } - - d.putbuf(buf) // needs to be here to pick up new buf value - close(done) // free up any waiting goroutines - }() - - // Fills from 0 to total from current - fromSmallCurrent := func(total int64) error { - current, err := d.ReadStream(ctx, path, 0) +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + key := d.ossPath(path) + if !append { + // TODO (brianbland): cancel other uploads at this path + multi, err := d.Bucket.InitMulti(key, d.getContentType(), getPermissions(), d.getOptions()) if err != nil { - return err - } - - bytesRead = 0 - for int64(bytesRead) < total { - //The loop should very rarely enter a second iteration - nn, err := current.Read(buf[bytesRead:total]) - bytesRead += nn - if err != nil { - if err != io.EOF { - return err - } - - break - } - + return nil, err } - return nil + return d.newWriter(key, multi, nil), nil } - - // Fills from parameter to chunkSize from reader - fromReader := func(from int64) error { - bytesRead = 0 - for from+int64(bytesRead) < d.ChunkSize { - nn, err := reader.Read(buf[from+int64(bytesRead):]) - totalRead += int64(nn) - bytesRead += nn - - if err != nil { - if err != io.EOF { - return err - } - - break - } - } - - if putErrChan == nil { - putErrChan = make(chan error) - } else { - if putErr := <-putErrChan; putErr != nil { - putErrChan = nil - return putErr - } - } - - go func(bytesRead int, from int64, buf []byte) { - defer d.putbuf(buf) // this buffer gets dropped after this call - - // DRAGONS(stevvooe): There are few things one might want to know - // about this section. First, the putErrChan is expecting an error - // and a nil or just a nil to come through the channel. This is - // covered by the silly defer below. The other aspect is the OSS - // retry backoff to deal with RequestTimeout errors. Even though - // the underlying OSS library should handle it, it doesn't seem to - // be part of the shouldRetry function (see denverdino/aliyungo/oss). - defer func() { - select { - case putErrChan <- nil: // for some reason, we do this no matter what. - case <-done: - return // ensure we don't leak the goroutine - } - }() - - if bytesRead <= 0 { - return - } - - var err error - var part oss.Part - - loop: - for retries := 0; retries < 5; retries++ { - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) - if err == nil { - break // success! - } - - // NOTE(stevvooe): This retry code tries to only retry under - // conditions where the OSS package does not. We may add oss - // error codes to the below if we see others bubble up in the - // application. Right now, the most troubling is - // RequestTimeout, which seems to only triggered when a tcp - // connection to OSS slows to a crawl. If the RequestTimeout - // ends up getting added to the OSS library and we don't see - // other errors, this retry loop can be removed. - switch err := err.(type) { - case *oss.Error: - switch err.Code { - case "RequestTimeout": - // allow retries on only this error. - default: - break loop - } - } - - backoff := 100 * time.Millisecond * time.Duration(retries+1) - logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) - time.Sleep(backoff) - } - - if err != nil { - logrus.Errorf("error putting part, aborting: %v", err) - select { - case putErrChan <- err: - case <-done: - return // don't leak the goroutine - } - } - - // parts and partNumber are safe, because this function is the - // only one modifying them and we force it to be executed - // serially. - parts = append(parts, part) - partNumber++ - }(bytesRead, from, buf) - - buf = d.getbuf() // use a new buffer for the next call - return nil + multis, _, err := d.Bucket.ListMulti(key, "") + if err != nil { + return nil, parseError(path, err) } - - if offset > 0 { - resp, err := d.Bucket.Head(d.ossPath(path), nil) - if err != nil { - if ossErr, ok := err.(*oss.Error); !ok || ossErr.Code != "NoSuchKey" { - return 0, err - } - } - - currentLength := int64(0) - if err == nil { - currentLength = resp.ContentLength - } - - if currentLength >= offset { - if offset < d.ChunkSize { - // chunkSize > currentLength >= offset - if err = fromSmallCurrent(offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // currentLength >= offset >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - oss.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, - d.Bucket.Path(d.ossPath(path))) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - } - } else { - // Fills between parameters with 0s but only when to - from <= chunkSize - fromZeroFillSmall := func(from, to int64) error { - bytesRead = 0 - for from+int64(bytesRead) < to { - nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) - bytesRead += nn - if err != nil { - return err - } - } - - return nil - } - - // Fills between parameters with 0s, making new parts - fromZeroFillLarge := func(from, to int64) error { - bytesRead64 := int64(0) - for to-(from+bytesRead64) >= d.ChunkSize { - part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) - if err != nil { - return err - } - bytesRead64 += d.ChunkSize - - parts = append(parts, part) - partNumber++ - } - - return fromZeroFillSmall(0, (to-from)%d.ChunkSize) - } - - // currentLength < offset - if currentLength < d.ChunkSize { - if offset < d.ChunkSize { - // chunkSize > offset > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // offset >= chunkSize > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { - return totalRead, err - } - - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) - if err != nil { - return totalRead, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from chunkSize up to offset, then some reader - if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+(offset%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - } else { - // offset > currentLength >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - oss.CopyOptions{}, - d.Bucket.Path(d.ossPath(path))) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from currentLength up to offset, then some reader - if err = fromZeroFillLarge(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - + for _, multi := range multis { + if key != multi.Key { + continue } - } - - for { - if err = fromReader(0); err != nil { - return totalRead, err + parts, err := multi.ListParts() + if err != nil { + return nil, parseError(path, err) } - - if int64(bytesRead) < d.ChunkSize { - break + var multiSize int64 + for _, part := range parts { + multiSize += part.Size } + return d.newWriter(key, multi, parts), nil } - - return totalRead, nil + return nil, storagedriver.PathNotFoundError{Path: path} } // Stat retrieves the FileInfo for the given path, including the current size @@ -706,15 +392,14 @@ func (d *driver) List(ctx context.Context, opath string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - logrus.Infof("Move from %s to %s", d.Bucket.Path("/"+d.ossPath(sourcePath)), d.ossPath(destPath)) - /* This is terrible, but aws doesn't have an actual move. */ - _, err := d.Bucket.PutCopy(d.ossPath(destPath), getPermissions(), - oss.CopyOptions{ - //Options: d.getOptions(), - //ContentType: d.getContentType() - }, - d.Bucket.Path(d.ossPath(sourcePath))) + logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath)) + + err := d.Bucket.CopyLargeFile(d.ossPath(sourcePath), d.ossPath(destPath), + d.getContentType(), + getPermissions(), + oss.Options{}) if err != nil { + logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err) return parseError(sourcePath, err) } @@ -756,13 +441,12 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int method, ok := options["method"] if ok { methodString, ok = method.(string) - if !ok || (methodString != "GET" && methodString != "HEAD") { + if !ok || (methodString != "GET") { return "", storagedriver.ErrUnsupportedMethod{} } } expiresTime := time.Now().Add(20 * time.Minute) - logrus.Infof("expiresTime: %d", expiresTime) expires, ok := options["expiry"] if ok { @@ -771,23 +455,18 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int expiresTime = et } } - logrus.Infof("expiresTime: %d", expiresTime) - testURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) - logrus.Infof("testURL: %s", testURL) - return testURL, nil + logrus.Infof("methodString: %s, expiresTime: %v", methodString, expiresTime) + signedURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) + logrus.Infof("signed URL: %s", signedURL) + return signedURL, nil } func (d *driver) ossPath(path string) string { return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") } -// S3BucketKey returns the OSS bucket key for the given storage driver path. -func (d *Driver) S3BucketKey(path string) string { - return d.StorageDriver.(*driver).ossPath(path) -} - func parseError(path string, err error) error { - if ossErr, ok := err.(*oss.Error); ok && ossErr.Code == "NoSuchKey" { + if ossErr, ok := err.(*oss.Error); ok && ossErr.StatusCode == http.StatusNotFound && (ossErr.Code == "NoSuchKey" || ossErr.Code == "") { return storagedriver.PathNotFoundError{Path: path} } @@ -811,12 +490,181 @@ func (d *driver) getContentType() string { return "application/octet-stream" } -// getbuf returns a buffer from the driver's pool with length d.ChunkSize. -func (d *driver) getbuf() []byte { - return d.pool.Get().([]byte) +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *driver + key string + multi *oss.Multi + parts []oss.Part + size int64 + readyPart []byte + pendingPart []byte + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(key string, multi *oss.Multi, parts []oss.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += part.Size + } + return &writer{ + driver: d, + key: key, + multi: multi, + parts: parts, + size: size, + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(w.parts[len(w.parts)-1].Size) < minChunkSize { + err := w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return 0, err + } + + multi, err := w.driver.Bucket.InitMulti(w.key, w.driver.getContentType(), getPermissions(), w.driver.getOptions()) + if err != nil { + return 0, err + } + w.multi = multi + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + contents, err := w.driver.Bucket.Get(w.key) + if err != nil { + return 0, err + } + w.parts = nil + w.readyPart = contents + } else { + // Otherwise we can use the old file as the new first part + _, part, err := multi.PutPartCopy(1, oss.CopyOptions{}, w.driver.Bucket.Name+"/"+w.key) + if err != nil { + return 0, err + } + w.parts = []oss.Part{part} + } + } + + var n int + + for len(p) > 0 { + // If no parts are ready to write, fill up the first part + if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.readyPart = append(w.readyPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + } else { + w.readyPart = append(w.readyPart, p...) + n += len(p) + p = nil + } + } + + if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.pendingPart = append(w.pendingPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + err := w.flushPart() + if err != nil { + w.size += int64(n) + return n, err + } + } else { + w.pendingPart = append(w.pendingPart, p...) + n += len(p) + p = nil + } + } + } + w.size += int64(n) + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.flushPart() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + err := w.multi.Abort() + return err } -func (d *driver) putbuf(p []byte) { - copy(p, d.zeros) - d.pool.Put(p) +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + err := w.flushPart() + if err != nil { + return err + } + w.committed = true + err = w.multi.Complete(w.parts) + if err != nil { + w.multi.Abort() + return err + } + return nil +} + +// flushPart flushes buffers to write a part to S3. +// Only called by Write (with both buffers full) and Close/Commit (always) +func (w *writer) flushPart() error { + if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { + // nothing to write + return nil + } + if len(w.pendingPart) < int(w.driver.ChunkSize) { + // closing with a small pending part + // combine ready and pending to avoid writing a small part + w.readyPart = append(w.readyPart, w.pendingPart...) + w.pendingPart = nil + } + + part, err := w.multi.PutPart(len(w.parts)+1, bytes.NewReader(w.readyPart)) + if err != nil { + return err + } + w.parts = append(w.parts, part) + w.readyPart = w.pendingPart + w.pendingPart = nil + return nil } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go deleted file mode 100644 index 655c68a33db8..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package rados implements the rados storage driver backend. Support can be -// enabled by including the "include_rados" build tag. -package rados diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go deleted file mode 100644 index c2be528e64da..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados.go +++ /dev/null @@ -1,632 +0,0 @@ -// +build include_rados - -package rados - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "path" - "strconv" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" - "github.com/docker/distribution/uuid" - "github.com/noahdesu/go-ceph/rados" -) - -const driverName = "rados" - -// Prefix all the stored blob -const objectBlobPrefix = "blob:" - -// Stripes objects size to 4M -const defaultChunkSize = 4 << 20 -const defaultXattrTotalSizeName = "total-size" - -// Max number of keys fetched from omap at each read operation -const defaultKeysFetched = 1 - -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { - poolname string - username string - chunksize uint64 -} - -func init() { - factory.Register(driverName, &radosDriverFactory{}) -} - -// radosDriverFactory implements the factory.StorageDriverFactory interface -type radosDriverFactory struct{} - -func (factory *radosDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - Conn *rados.Conn - Ioctx *rados.IOContext - chunksize uint64 -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Ceph RADOS -// Objects are stored at absolute keys in the provided bucket. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - poolname: the ceph pool name -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - - pool, ok := parameters["poolname"] - if !ok { - return nil, fmt.Errorf("No poolname parameter provided") - } - - username, ok := parameters["username"] - if !ok { - username = "" - } - - chunksize := uint64(defaultChunkSize) - chunksizeParam, ok := parameters["chunksize"] - if ok { - chunksize, ok = chunksizeParam.(uint64) - if !ok { - return nil, fmt.Errorf("The chunksize parameter should be a number") - } - } - - params := DriverParameters{ - fmt.Sprint(pool), - fmt.Sprint(username), - chunksize, - } - - return New(params) -} - -// New constructs a new Driver -func New(params DriverParameters) (*Driver, error) { - var conn *rados.Conn - var err error - - if params.username != "" { - log.Infof("Opening connection to pool %s using user %s", params.poolname, params.username) - conn, err = rados.NewConnWithUser(params.username) - } else { - log.Infof("Opening connection to pool %s", params.poolname) - conn, err = rados.NewConn() - } - - if err != nil { - return nil, err - } - - err = conn.ReadDefaultConfigFile() - if err != nil { - return nil, err - } - - err = conn.Connect() - if err != nil { - return nil, err - } - - log.Infof("Connected") - - ioctx, err := conn.OpenIOContext(params.poolname) - - log.Infof("Connected to pool %s", params.poolname) - - if err != nil { - return nil, err - } - - d := &driver{ - Ioctx: ioctx, - Conn: conn, - chunksize: params.chunksize, - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - rc, err := d.ReadStream(ctx, path, 0) - if err != nil { - return nil, err - } - defer rc.Close() - - p, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - - return p, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - if _, err := d.WriteStream(ctx, path, 0, bytes.NewReader(contents)); err != nil { - return err - } - - return nil -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -type readStreamReader struct { - driver *driver - oid string - size uint64 - offset uint64 -} - -func (r *readStreamReader) Read(b []byte) (n int, err error) { - // Determine the part available to read - bufferOffset := uint64(0) - bufferSize := uint64(len(b)) - - // End of the object, read less than the buffer size - if bufferSize > r.size-r.offset { - bufferSize = r.size - r.offset - } - - // Fill `b` - for bufferOffset < bufferSize { - // Get the offset in the object chunk - chunkedOid, chunkedOffset := r.driver.getChunkNameFromOffset(r.oid, r.offset) - - // Determine the best size to read - bufferEndOffset := bufferSize - if bufferEndOffset-bufferOffset > r.driver.chunksize-chunkedOffset { - bufferEndOffset = bufferOffset + (r.driver.chunksize - chunkedOffset) - } - - // Read the chunk - n, err = r.driver.Ioctx.Read(chunkedOid, b[bufferOffset:bufferEndOffset], chunkedOffset) - - if err != nil { - return int(bufferOffset), err - } - - bufferOffset += uint64(n) - r.offset += uint64(n) - } - - // EOF if the offset is at the end of the object - if r.offset == r.size { - return int(bufferOffset), io.EOF - } - - return int(bufferOffset), nil -} - -func (r *readStreamReader) Close() error { - return nil -} - -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - // get oid from filename - oid, err := d.getOid(path) - - if err != nil { - return nil, err - } - - // get object stat - stat, err := d.Stat(ctx, path) - - if err != nil { - return nil, err - } - - if offset > stat.Size() { - return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} - } - - return &readStreamReader{ - driver: d, - oid: oid, - size: uint64(stat.Size()), - offset: uint64(offset), - }, nil -} - -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - buf := make([]byte, d.chunksize) - totalRead = 0 - - oid, err := d.getOid(path) - if err != nil { - switch err.(type) { - // Trying to write new object, generate new blob identifier for it - case storagedriver.PathNotFoundError: - oid = d.generateOid() - err = d.putOid(path, oid) - if err != nil { - return 0, err - } - default: - return 0, err - } - } else { - // Check total object size only for existing ones - totalSize, err := d.getXattrTotalSize(ctx, oid) - if err != nil { - return 0, err - } - - // If offset if after the current object size, fill the gap with zeros - for totalSize < uint64(offset) { - sizeToWrite := d.chunksize - if totalSize-uint64(offset) < sizeToWrite { - sizeToWrite = totalSize - uint64(offset) - } - - chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(totalSize)) - err = d.Ioctx.Write(chunkName, buf[:sizeToWrite], uint64(chunkOffset)) - if err != nil { - return totalRead, err - } - - totalSize += sizeToWrite - } - } - - // Writer - for { - // Align to chunk size - sizeRead := uint64(0) - sizeToRead := uint64(offset+totalRead) % d.chunksize - if sizeToRead == 0 { - sizeToRead = d.chunksize - } - - // Read from `reader` - for sizeRead < sizeToRead { - nn, err := reader.Read(buf[sizeRead:sizeToRead]) - sizeRead += uint64(nn) - - if err != nil { - if err != io.EOF { - return totalRead, err - } - - break - } - } - - // End of file and nothing was read - if sizeRead == 0 { - break - } - - // Write chunk object - chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(offset+totalRead)) - err = d.Ioctx.Write(chunkName, buf[:sizeRead], uint64(chunkOffset)) - - if err != nil { - return totalRead, err - } - - // Update total object size as xattr in the first chunk of the object - err = d.setXattrTotalSize(oid, uint64(offset+totalRead)+sizeRead) - if err != nil { - return totalRead, err - } - - totalRead += int64(sizeRead) - - // End of file - if sizeRead < sizeToRead { - break - } - } - - return totalRead, nil -} - -// Stat retrieves the FileInfo for the given path, including the current size -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - // get oid from filename - oid, err := d.getOid(path) - - if err != nil { - return nil, err - } - - // the path is a virtual directory? - if oid == "" { - return storagedriver.FileInfoInternal{ - FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - Size: 0, - IsDir: true, - }, - }, nil - } - - // stat first chunk - stat, err := d.Ioctx.Stat(oid + "-0") - - if err != nil { - return nil, err - } - - // get total size of chunked object - totalSize, err := d.getXattrTotalSize(ctx, oid) - - if err != nil { - return nil, err - } - - return storagedriver.FileInfoInternal{ - FileInfoFields: storagedriver.FileInfoFields{ - Path: path, - Size: int64(totalSize), - ModTime: stat.ModTime, - }, - }, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, dirPath string) ([]string, error) { - files, err := d.listDirectoryOid(dirPath) - - if err != nil { - return nil, storagedriver.PathNotFoundError{Path: dirPath} - } - - keys := make([]string, 0, len(files)) - for k := range files { - if k != dirPath { - keys = append(keys, path.Join(dirPath, k)) - } - } - - return keys, nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - // Get oid - oid, err := d.getOid(sourcePath) - - if err != nil { - return err - } - - // Move reference - err = d.putOid(destPath, oid) - - if err != nil { - return err - } - - // Delete old reference - err = d.deleteOid(sourcePath) - - if err != nil { - return err - } - - return nil -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, objectPath string) error { - // Get oid - oid, err := d.getOid(objectPath) - - if err != nil { - return err - } - - // Deleting virtual directory - if oid == "" { - objects, err := d.listDirectoryOid(objectPath) - if err != nil { - return err - } - - for object := range objects { - err = d.Delete(ctx, path.Join(objectPath, object)) - if err != nil { - return err - } - } - } else { - // Delete object chunks - totalSize, err := d.getXattrTotalSize(ctx, oid) - - if err != nil { - return err - } - - for offset := uint64(0); offset < totalSize; offset += d.chunksize { - chunkName, _ := d.getChunkNameFromOffset(oid, offset) - - err = d.Ioctx.Delete(chunkName) - if err != nil { - return err - } - } - - // Delete reference - err = d.deleteOid(objectPath) - if err != nil { - return err - } - } - - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - return "", storagedriver.ErrUnsupportedMethod{} -} - -// Generate a blob identifier -func (d *driver) generateOid() string { - return objectBlobPrefix + uuid.Generate().String() -} - -// Reference a object and its hierarchy -func (d *driver) putOid(objectPath string, oid string) error { - directory := path.Dir(objectPath) - base := path.Base(objectPath) - createParentReference := true - - // After creating this reference, skip the parents referencing since the - // hierarchy already exists - if oid == "" { - firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) - if (err == nil) && (len(firstReference) > 0) { - createParentReference = false - } - } - - oids := map[string][]byte{ - base: []byte(oid), - } - - // Reference object - err := d.Ioctx.SetOmap(directory, oids) - if err != nil { - return err - } - - // Esure parent virtual directories - if createParentReference { - return d.putOid(directory, "") - } - - return nil -} - -// Get the object identifier from an object name -func (d *driver) getOid(objectPath string) (string, error) { - directory := path.Dir(objectPath) - base := path.Base(objectPath) - - files, err := d.Ioctx.GetOmapValues(directory, "", base, 1) - - if (err != nil) || (files[base] == nil) { - return "", storagedriver.PathNotFoundError{Path: objectPath} - } - - return string(files[base]), nil -} - -// List the objects of a virtual directory -func (d *driver) listDirectoryOid(path string) (list map[string][]byte, err error) { - return d.Ioctx.GetAllOmapValues(path, "", "", defaultKeysFetched) -} - -// Remove a file from the files hierarchy -func (d *driver) deleteOid(objectPath string) error { - // Remove object reference - directory := path.Dir(objectPath) - base := path.Base(objectPath) - err := d.Ioctx.RmOmapKeys(directory, []string{base}) - - if err != nil { - return err - } - - // Remove virtual directory if empty (no more references) - firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) - - if err != nil { - return err - } - - if len(firstReference) == 0 { - // Delete omap - err := d.Ioctx.Delete(directory) - - if err != nil { - return err - } - - // Remove reference on parent omaps - if directory != "" { - return d.deleteOid(directory) - } - } - - return nil -} - -// Takes an offset in an chunked object and return the chunk name and a new -// offset in this chunk object -func (d *driver) getChunkNameFromOffset(oid string, offset uint64) (string, uint64) { - chunkID := offset / d.chunksize - chunkedOid := oid + "-" + strconv.FormatInt(int64(chunkID), 10) - chunkedOffset := offset % d.chunksize - return chunkedOid, chunkedOffset -} - -// Set the total size of a chunked object `oid` -func (d *driver) setXattrTotalSize(oid string, size uint64) error { - // Convert uint64 `size` to []byte - xattr := make([]byte, binary.MaxVarintLen64) - binary.LittleEndian.PutUint64(xattr, size) - - // Save the total size as a xattr in the first chunk - return d.Ioctx.SetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) -} - -// Get the total size of the chunked object `oid` stored as xattr -func (d *driver) getXattrTotalSize(ctx context.Context, oid string) (uint64, error) { - // Fetch xattr as []byte - xattr := make([]byte, binary.MaxVarintLen64) - xattrLength, err := d.Ioctx.GetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) - - if err != nil { - return 0, err - } - - if xattrLength != len(xattr) { - context.GetLogger(ctx).Errorf("object %s xattr length mismatch: %d != %d", oid, xattrLength, len(xattr)) - return 0, storagedriver.PathNotFoundError{Path: oid} - } - - // Convert []byte as uint64 - totalSize := binary.LittleEndian.Uint64(xattr) - - return totalSize, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados_test.go deleted file mode 100644 index ce367fb56684..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/rados/rados_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build include_rados - -package rados - -import ( - "os" - "testing" - - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -func init() { - poolname := os.Getenv("RADOS_POOL") - username := os.Getenv("RADOS_USER") - - driverConstructor := func() (storagedriver.StorageDriver, error) { - parameters := DriverParameters{ - poolname, - username, - defaultChunkSize, - } - - return New(parameters) - } - - skipCheck := func() string { - if poolname == "" { - return "RADOS_POOL must be set to run Rado tests" - } - return "" - } - - testsuites.RegisterSuite(driverConstructor, skipCheck) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go new file mode 100644 index 000000000000..902abeb4ce43 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3-aws/s3.go @@ -0,0 +1,960 @@ +// Package s3 provides a storagedriver.StorageDriver implementation to +// store blobs in Amazon S3 cloud storage. +// +// This package leverages the official aws client library for interfacing with +// S3. +// +// Because S3 is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that S3 guarantees only read-after-write consistency for new +// objects, but no read-after-update or list-after-write consistency. +package s3 + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client/transport" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "s3aws" + +// minChunkSize defines the minimum multipart upload chunk size +// S3 API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize + +// listMax is the largest amount of objects you can request from S3 in a list call +const listMax = 1000 + +// validRegions maps known s3 region identifiers to region descriptors +var validRegions = map[string]struct{}{} + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKey string + SecretKey string + Bucket string + Region string + RegionEndpoint string + Encrypt bool + KeyID string + Secure bool + ChunkSize int64 + RootDirectory string + StorageClass string + UserAgent string +} + +func init() { + for _, region := range []string{ + "us-east-1", + "us-west-1", + "us-west-2", + "eu-west-1", + "eu-central-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ap-northeast-2", + "sa-east-1", + "cn-north-1", + "us-gov-west-1", + } { + validRegions[region] = struct{}{} + } + + // Register this as the default s3 driver in addition to s3aws + factory.Register("s3", &s3DriverFactory{}) + factory.Register(driverName, &s3DriverFactory{}) +} + +// s3DriverFactory implements the factory.StorageDriverFactory interface +type s3DriverFactory struct{} + +func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + S3 *s3.S3 + Bucket string + ChunkSize int64 + Encrypt bool + KeyID string + RootDirectory string + StorageClass string +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey := parameters["accesskey"] + if accessKey == nil { + accessKey = "" + } + secretKey := parameters["secretkey"] + if secretKey == nil { + secretKey = "" + } + + regionEndpoint := parameters["regionendpoint"] + if regionEndpoint == nil { + regionEndpoint = "" + } + + regionName, ok := parameters["region"] + if regionName == nil || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + region := fmt.Sprint(regionName) + // Don't check the region value if a custom endpoint is provided. + if regionEndpoint == "" { + if _, ok = validRegions[region]; !ok { + return nil, fmt.Errorf("Invalid region provided: %v", region) + } + } + + bucket := parameters["bucket"] + if bucket == nil || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + encryptBool := false + encrypt := parameters["encrypt"] + switch encrypt := encrypt.(type) { + case string: + b, err := strconv.ParseBool(encrypt) + if err != nil { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + encryptBool = b + case bool: + encryptBool = encrypt + case nil: + // do nothing + default: + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + + secureBool := true + secure := parameters["secure"] + switch secure := secure.(type) { + case string: + b, err := strconv.ParseBool(secure) + if err != nil { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + secureBool = b + case bool: + secureBool = secure + case nil: + // do nothing + default: + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + + keyID := parameters["keyid"] + if keyID == nil { + keyID = "" + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam := parameters["chunksize"] + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + case nil: + // do nothing + default: + return nil, fmt.Errorf("invalid value for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + + rootDirectory := parameters["rootdirectory"] + if rootDirectory == nil { + rootDirectory = "" + } + + storageClass := s3.StorageClassStandard + storageClassParam := parameters["storageclass"] + if storageClassParam != nil { + storageClassString, ok := storageClassParam.(string) + if !ok { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) + } + // All valid storage class parameters are UPPERCASE, so be a bit more flexible here + storageClassString = strings.ToUpper(storageClassString) + if storageClassString != s3.StorageClassStandard && storageClassString != s3.StorageClassReducedRedundancy { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) + } + storageClass = storageClassString + } + + userAgent := parameters["useragent"] + if userAgent == nil { + userAgent = "" + } + + params := DriverParameters{ + fmt.Sprint(accessKey), + fmt.Sprint(secretKey), + fmt.Sprint(bucket), + region, + fmt.Sprint(regionEndpoint), + encryptBool, + fmt.Sprint(keyID), + secureBool, + chunkSize, + fmt.Sprint(rootDirectory), + storageClass, + fmt.Sprint(userAgent), + } + + return New(params) +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + awsConfig := aws.NewConfig() + var creds *credentials.Credentials + if params.RegionEndpoint == "" { + creds = credentials.NewChainCredentials([]credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: params.AccessKey, + SecretAccessKey: params.SecretKey, + }, + }, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, + }) + + } else { + creds = credentials.NewChainCredentials([]credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: params.AccessKey, + SecretAccessKey: params.SecretKey, + }, + }, + &credentials.EnvProvider{}, + }) + awsConfig.WithS3ForcePathStyle(true) + awsConfig.WithEndpoint(params.RegionEndpoint) + } + + awsConfig.WithCredentials(creds) + awsConfig.WithRegion(params.Region) + awsConfig.WithDisableSSL(!params.Secure) + + if params.UserAgent != "" { + awsConfig.WithHTTPClient(&http.Client{ + Transport: transport.NewTransport(http.DefaultTransport, transport.NewHeaderRequestModifier(http.Header{http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}})), + }) + } + + s3obj := s3.New(session.New(awsConfig)) + + // TODO Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new s3driver while another one is running on the same bucket. + // multis, _, err := bucket.ListMulti("", "") + // if err != nil { + // return nil, err + // } + + // for _, multi := range multis { + // err := multi.Abort() + // //TODO appropriate to do this error checking? + // if err != nil { + // return nil, err + // } + // } + + d := &driver{ + S3: s3obj, + Bucket: params.Bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + KeyID: params.KeyID, + RootDirectory: params.RootDirectory, + StorageClass: params.StorageClass, + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + reader, err := d.Reader(ctx, path, 0) + if err != nil { + return nil, err + } + return ioutil.ReadAll(reader) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + _, err := d.S3.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + Body: bytes.NewReader(contents), + }) + return parseError(path, err) +} + +// Reader retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + resp, err := d.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"), + }) + + if err != nil { + if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { + key := d.s3Path(path) + if !append { + // TODO (brianbland): cancel other uploads at this path + resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(key), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + }) + if err != nil { + return nil, err + } + return d.newWriter(key, *resp.UploadId, nil), nil + } + resp, err := d.S3.ListMultipartUploads(&s3.ListMultipartUploadsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(key), + }) + if err != nil { + return nil, parseError(path, err) + } + + for _, multi := range resp.Uploads { + if key != *multi.Key { + continue + } + resp, err := d.S3.ListParts(&s3.ListPartsInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(key), + UploadId: multi.UploadId, + }) + if err != nil { + return nil, parseError(path, err) + } + var multiSize int64 + for _, part := range resp.Parts { + multiSize += *part.Size + } + return d.newWriter(key, *multi.UploadId, resp.Parts), nil + } + return nil, storagedriver.PathNotFoundError{Path: path} +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + MaxKeys: aws.Int64(1), + }) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(resp.Contents) == 1 { + if *resp.Contents[0].Key != d.s3Path(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = *resp.Contents[0].Size + fi.ModTime = *resp.Contents[0].LastModified + } + } else if len(resp.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + }) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range resp.Contents { + files = append(files, strings.Replace(*key.Key, d.s3Path(""), prefix, 1)) + } + + for _, commonPrefix := range resp.CommonPrefixes { + commonPrefix := *commonPrefix.Prefix + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) + } + + if *resp.IsTruncated { + resp, err = d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + Marker: resp.NextMarker, + }) + if err != nil { + return nil, err + } + } else { + break + } + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + /* This is terrible, but aws doesn't have an actual move. */ + _, err := d.S3.CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + SSEKMSKeyId: d.getSSEKMSKeyID(), + StorageClass: d.getStorageClass(), + CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)), + }) + if err != nil { + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + }) + if err != nil || len(resp.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + s3Objects := make([]*s3.ObjectIdentifier, 0, listMax) + + for len(resp.Contents) > 0 { + for _, key := range resp.Contents { + s3Objects = append(s3Objects, &s3.ObjectIdentifier{ + Key: key.Key, + }) + } + + _, err := d.S3.DeleteObjects(&s3.DeleteObjectsInput{ + Bucket: aws.String(d.Bucket), + Delete: &s3.Delete{ + Objects: s3Objects, + Quiet: aws.Bool(false), + }, + }) + if err != nil { + return nil + } + + resp, err = d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + }) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresIn := 20 * time.Minute + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresIn = et.Sub(time.Now()) + } + } + + var req *request.Request + + switch methodString { + case "GET": + req, _ = d.S3.GetObjectRequest(&s3.GetObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }) + case "HEAD": + req, _ = d.S3.HeadObjectRequest(&s3.HeadObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }) + default: + panic("unreachable") + } + + return req.Presign(expiresIn) +} + +func (d *driver) s3Path(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the s3 bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).s3Path(path) +} + +func parseError(path string, err error) error { + if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func (d *driver) getEncryptionMode() *string { + if !d.Encrypt { + return nil + } + if d.KeyID == "" { + return aws.String("AES256") + } + return aws.String("aws:kms") +} + +func (d *driver) getSSEKMSKeyID() *string { + if d.KeyID != "" { + return aws.String(d.KeyID) + } + return nil +} + +func (d *driver) getContentType() *string { + return aws.String("application/octet-stream") +} + +func (d *driver) getACL() *string { + return aws.String("private") +} + +func (d *driver) getStorageClass() *string { + return aws.String(d.StorageClass) +} + +// writer attempts to upload parts to S3 in a buffered fashion where the last +// part is at least as large as the chunksize, so the multipart upload could be +// cleanly resumed in the future. This is violated if Close is called after less +// than a full chunk is written. +type writer struct { + driver *driver + key string + uploadID string + parts []*s3.Part + size int64 + readyPart []byte + pendingPart []byte + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(key, uploadID string, parts []*s3.Part) storagedriver.FileWriter { + var size int64 + for _, part := range parts { + size += *part.Size + } + return &writer{ + driver: d, + key: key, + uploadID: uploadID, + parts: parts, + size: size, + } +} + +type completedParts []*s3.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + // If the last written part is smaller than minChunkSize, we need to make a + // new multipart upload :sadface: + if len(w.parts) > 0 && int(*w.parts[len(w.parts)-1].Size) < minChunkSize { + var completedUploadedParts completedParts + for _, part := range w.parts { + completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + sort.Sort(completedUploadedParts) + + _, err := w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: completedUploadedParts, + }, + }) + if err != nil { + w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return 0, err + } + + resp, err := w.driver.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + ContentType: w.driver.getContentType(), + ACL: w.driver.getACL(), + ServerSideEncryption: w.driver.getEncryptionMode(), + StorageClass: w.driver.getStorageClass(), + }) + if err != nil { + return 0, err + } + w.uploadID = *resp.UploadId + + // If the entire written file is smaller than minChunkSize, we need to make + // a new part from scratch :double sad face: + if w.size < minChunkSize { + resp, err := w.driver.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + }) + defer resp.Body.Close() + if err != nil { + return 0, err + } + w.parts = nil + w.readyPart, err = ioutil.ReadAll(resp.Body) + if err != nil { + return 0, err + } + } else { + // Otherwise we can use the old file as the new first part + copyPartResp, err := w.driver.S3.UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String(w.driver.Bucket), + CopySource: aws.String(w.driver.Bucket + "/" + w.key), + Key: aws.String(w.key), + PartNumber: aws.Int64(1), + UploadId: resp.UploadId, + }) + if err != nil { + return 0, err + } + w.parts = []*s3.Part{ + { + ETag: copyPartResp.CopyPartResult.ETag, + PartNumber: aws.Int64(1), + Size: aws.Int64(w.size), + }, + } + } + } + + var n int + + for len(p) > 0 { + // If no parts are ready to write, fill up the first part + if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.readyPart = append(w.readyPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + } else { + w.readyPart = append(w.readyPart, p...) + n += len(p) + p = nil + } + } + + if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { + if len(p) >= neededBytes { + w.pendingPart = append(w.pendingPart, p[:neededBytes]...) + n += neededBytes + p = p[neededBytes:] + err := w.flushPart() + if err != nil { + w.size += int64(n) + return n, err + } + } else { + w.pendingPart = append(w.pendingPart, p...) + n += len(p) + p = nil + } + } + } + w.size += int64(n) + return n, nil +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + w.closed = true + return w.flushPart() +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + _, err := w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return err +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + err := w.flushPart() + if err != nil { + return err + } + w.committed = true + + var completedUploadedParts completedParts + for _, part := range w.parts { + completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + sort.Sort(completedUploadedParts) + + _, err = w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: completedUploadedParts, + }, + }) + if err != nil { + w.driver.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + UploadId: aws.String(w.uploadID), + }) + return err + } + return nil +} + +// flushPart flushes buffers to write a part to S3. +// Only called by Write (with both buffers full) and Close/Commit (always) +func (w *writer) flushPart() error { + if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { + // nothing to write + return nil + } + if len(w.pendingPart) < int(w.driver.ChunkSize) { + // closing with a small pending part + // combine ready and pending to avoid writing a small part + w.readyPart = append(w.readyPart, w.pendingPart...) + w.pendingPart = nil + } + + partNumber := aws.Int64(int64(len(w.parts) + 1)) + resp, err := w.driver.S3.UploadPart(&s3.UploadPartInput{ + Bucket: aws.String(w.driver.Bucket), + Key: aws.String(w.key), + PartNumber: partNumber, + UploadId: aws.String(w.uploadID), + Body: bytes.NewReader(w.readyPart), + }) + if err != nil { + return err + } + w.parts = append(w.parts, &s3.Part{ + ETag: resp.ETag, + PartNumber: partNumber, + Size: aws.Int64(int64(len(w.readyPart))), + }) + w.readyPart = w.pendingPart + w.pendingPart = nil + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_test.go new file mode 100644 index 000000000000..bb64ccf443b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3-aws/s3_test.go @@ -0,0 +1,205 @@ +package s3 + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error) +var skipS3 func() string + +func init() { + accessKey := os.Getenv("AWS_ACCESS_KEY") + secretKey := os.Getenv("AWS_SECRET_KEY") + bucket := os.Getenv("S3_BUCKET") + encrypt := os.Getenv("S3_ENCRYPT") + keyID := os.Getenv("S3_KEY_ID") + secure := os.Getenv("S3_SECURE") + region := os.Getenv("AWS_REGION") + root, err := ioutil.TempDir("", "driver-") + regionEndpoint := os.Getenv("REGION_ENDPOINT") + if err != nil { + panic(err) + } + defer os.Remove(root) + + s3DriverConstructor = func(rootDirectory, storageClass string) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := true + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + accessKey, + secretKey, + bucket, + region, + regionEndpoint, + encryptBool, + keyID, + secureBool, + minChunkSize, + rootDirectory, + storageClass, + driverName + "-test", + } + + return New(parameters) + } + + // Skip S3 storage driver tests if environment variable parameters are not provided + skipS3 = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return s3DriverConstructor(root, s3.StorageClassStandard) + }, skipS3) +} + +func TestEmptyRootList(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := s3DriverConstructor(validRoot, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := s3DriverConstructor("", s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := s3DriverConstructor("/", s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} + +func TestStorageClass(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + standardDriver, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating driver with standard storage: %v", err) + } + + rrDriver, err := s3DriverConstructor(rootDir, s3.StorageClassReducedRedundancy) + if err != nil { + t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err) + } + + standardFilename := "/test-standard" + rrFilename := "/test-rr" + contents := []byte("contents") + ctx := context.Background() + + err = standardDriver.PutContent(ctx, standardFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer standardDriver.Delete(ctx, standardFilename) + + err = rrDriver.PutContent(ctx, rrFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rrDriver.Delete(ctx, rrFilename) + + standardDriverUnwrapped := standardDriver.Base.StorageDriver.(*driver) + resp, err := standardDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(standardDriverUnwrapped.Bucket), + Key: aws.String(standardDriverUnwrapped.s3Path(standardFilename)), + }) + if err != nil { + t.Fatalf("unexpected error retrieving standard storage file: %v", err) + } + defer resp.Body.Close() + // Amazon only populates this header value for non-standard storage classes + if resp.StorageClass != nil { + t.Fatalf("unexpected storage class for standard file: %v", resp.StorageClass) + } + + rrDriverUnwrapped := rrDriver.Base.StorageDriver.(*driver) + resp, err = rrDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(rrDriverUnwrapped.Bucket), + Key: aws.String(rrDriverUnwrapped.s3Path(rrFilename)), + }) + if err != nil { + t.Fatalf("unexpected error retrieving reduced-redundancy storage file: %v", err) + } + defer resp.Body.Close() + if resp.StorageClass == nil { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", s3.StorageClassStandard) + } else if *resp.StorageClass != s3.StorageClassReducedRedundancy { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", *resp.StorageClass) + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go deleted file mode 100644 index 7bb23a85de18..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3.go +++ /dev/null @@ -1,829 +0,0 @@ -// Package s3 provides a storagedriver.StorageDriver implementation to -// store blobs in Amazon S3 cloud storage. -// -// This package leverages the AdRoll/goamz client library for interfacing with -// s3. -// -// Because s3 is a key, value store the Stat call does not support last modification -// time for directories (directories are an abstraction for key, value stores) -// -// Keep in mind that s3 guarantees only eventual consistency, so do not assume -// that a successful write will mean immediate access to the data written (although -// in most regions a new object put has guaranteed read after write). The only true -// guarantee is that once you call Stat and receive a certain file size, that much of -// the file is already accessible. -package s3 - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/AdRoll/goamz/aws" - "github.com/AdRoll/goamz/s3" - "github.com/Sirupsen/logrus" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" -) - -const driverName = "s3" - -// minChunkSize defines the minimum multipart upload chunk size -// S3 API requires multipart upload chunks to be at least 5MB -const minChunkSize = 5 << 20 - -const defaultChunkSize = 2 * minChunkSize - -// listMax is the largest amount of objects you can request from S3 in a list call -const listMax = 1000 - -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { - AccessKey string - SecretKey string - Bucket string - Region aws.Region - Encrypt bool - Secure bool - V4Auth bool - ChunkSize int64 - RootDirectory string -} - -func init() { - factory.Register(driverName, &s3DriverFactory{}) -} - -// s3DriverFactory implements the factory.StorageDriverFactory interface -type s3DriverFactory struct{} - -func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - S3 *s3.S3 - Bucket *s3.Bucket - ChunkSize int64 - Encrypt bool - RootDirectory string - - pool sync.Pool // pool []byte buffers used for WriteStream - zeros []byte // shared, zero-valued buffer used for WriteStream -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 -// Objects are stored at absolute keys in the provided bucket. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - accesskey -// - secretkey -// - region -// - bucket -// - encrypt -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - // Providing no values for these is valid in case the user is authenticating - // with an IAM on an ec2 instance (in which case the instance credentials will - // be summoned when GetAuth is called) - accessKey, ok := parameters["accesskey"] - if !ok { - accessKey = "" - } - secretKey, ok := parameters["secretkey"] - if !ok { - secretKey = "" - } - - regionName, ok := parameters["region"] - if !ok || fmt.Sprint(regionName) == "" { - return nil, fmt.Errorf("No region parameter provided") - } - region := aws.GetRegion(fmt.Sprint(regionName)) - if region.Name == "" { - return nil, fmt.Errorf("Invalid region provided: %v", region) - } - - bucket, ok := parameters["bucket"] - if !ok || fmt.Sprint(bucket) == "" { - return nil, fmt.Errorf("No bucket parameter provided") - } - - encryptBool := false - encrypt, ok := parameters["encrypt"] - if ok { - encryptBool, ok = encrypt.(bool) - if !ok { - return nil, fmt.Errorf("The encrypt parameter should be a boolean") - } - } - - secureBool := true - secure, ok := parameters["secure"] - if ok { - secureBool, ok = secure.(bool) - if !ok { - return nil, fmt.Errorf("The secure parameter should be a boolean") - } - } - - v4AuthBool := false - v4Auth, ok := parameters["v4auth"] - if ok { - v4AuthBool, ok = v4Auth.(bool) - if !ok { - return nil, fmt.Errorf("The v4auth parameter should be a boolean") - } - } - - chunkSize := int64(defaultChunkSize) - chunkSizeParam, ok := parameters["chunksize"] - if ok { - switch v := chunkSizeParam.(type) { - case string: - vv, err := strconv.ParseInt(v, 0, 64) - if err != nil { - return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) - } - chunkSize = vv - case int64: - chunkSize = v - case int, uint, int32, uint32, uint64: - chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() - default: - return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) - } - - if chunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) - } - } - - rootDirectory, ok := parameters["rootdirectory"] - if !ok { - rootDirectory = "" - } - - params := DriverParameters{ - fmt.Sprint(accessKey), - fmt.Sprint(secretKey), - fmt.Sprint(bucket), - region, - encryptBool, - secureBool, - v4AuthBool, - chunkSize, - fmt.Sprint(rootDirectory), - } - - return New(params) -} - -// New constructs a new Driver with the given AWS credentials, region, encryption flag, and -// bucketName -func New(params DriverParameters) (*Driver, error) { - auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) - if err != nil { - return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err) - } - - if !params.Secure { - params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1) - } - - s3obj := s3.New(auth, params.Region) - bucket := s3obj.Bucket(params.Bucket) - - if params.V4Auth { - s3obj.Signature = aws.V4Signature - } else { - if params.Region.Name == "eu-central-1" { - return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication") - } - } - - // TODO Currently multipart uploads have no timestamps, so this would be unwise - // if you initiated a new s3driver while another one is running on the same bucket. - // multis, _, err := bucket.ListMulti("", "") - // if err != nil { - // return nil, err - // } - - // for _, multi := range multis { - // err := multi.Abort() - // //TODO appropriate to do this error checking? - // if err != nil { - // return nil, err - // } - // } - - d := &driver{ - S3: s3obj, - Bucket: bucket, - ChunkSize: params.ChunkSize, - Encrypt: params.Encrypt, - RootDirectory: params.RootDirectory, - zeros: make([]byte, params.ChunkSize), - } - - d.pool.New = func() interface{} { - return make([]byte, d.ChunkSize) - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - content, err := d.Bucket.Get(d.s3Path(path)) - if err != nil { - return nil, parseError(path, err) - } - return content, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) -} - -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - headers := make(http.Header) - headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") - - resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers) - if err != nil { - if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - return nil, parseError(path, err) - } - return resp.Body, nil -} - -// WriteStream stores the contents of the provided io.Reader at a -// location designated by the given path. The driver will know it has -// received the full contents when the reader returns io.EOF. The number -// of successfully READ bytes will be returned, even if an error is -// returned. May be used to resume writing a stream by providing a nonzero -// offset. Offsets past the current size will write from the position -// beyond the end of the file. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { - partNumber := 1 - bytesRead := 0 - var putErrChan chan error - parts := []s3.Part{} - var part s3.Part - done := make(chan struct{}) // stopgap to free up waiting goroutines - - multi, err := d.Bucket.InitMulti(d.s3Path(path), d.getContentType(), getPermissions(), d.getOptions()) - if err != nil { - return 0, err - } - - buf := d.getbuf() - - // We never want to leave a dangling multipart upload, our only consistent state is - // when there is a whole object at path. This is in order to remain consistent with - // the stat call. - // - // Note that if the machine dies before executing the defer, we will be left with a dangling - // multipart upload, which will eventually be cleaned up, but we will lose all of the progress - // made prior to the machine crashing. - defer func() { - if putErrChan != nil { - if putErr := <-putErrChan; putErr != nil { - err = putErr - } - } - - if len(parts) > 0 { - if multi == nil { - // Parts should be empty if the multi is not initialized - panic("Unreachable") - } else { - if multi.Complete(parts) != nil { - multi.Abort() - } - } - } - - d.putbuf(buf) // needs to be here to pick up new buf value - close(done) // free up any waiting goroutines - }() - - // Fills from 0 to total from current - fromSmallCurrent := func(total int64) error { - current, err := d.ReadStream(ctx, path, 0) - if err != nil { - return err - } - - bytesRead = 0 - for int64(bytesRead) < total { - //The loop should very rarely enter a second iteration - nn, err := current.Read(buf[bytesRead:total]) - bytesRead += nn - if err != nil { - if err != io.EOF { - return err - } - - break - } - - } - return nil - } - - // Fills from parameter to chunkSize from reader - fromReader := func(from int64) error { - bytesRead = 0 - for from+int64(bytesRead) < d.ChunkSize { - nn, err := reader.Read(buf[from+int64(bytesRead):]) - totalRead += int64(nn) - bytesRead += nn - - if err != nil { - if err != io.EOF { - return err - } - - break - } - } - - if putErrChan == nil { - putErrChan = make(chan error) - } else { - if putErr := <-putErrChan; putErr != nil { - putErrChan = nil - return putErr - } - } - - go func(bytesRead int, from int64, buf []byte) { - defer d.putbuf(buf) // this buffer gets dropped after this call - - // DRAGONS(stevvooe): There are few things one might want to know - // about this section. First, the putErrChan is expecting an error - // and a nil or just a nil to come through the channel. This is - // covered by the silly defer below. The other aspect is the s3 - // retry backoff to deal with RequestTimeout errors. Even though - // the underlying s3 library should handle it, it doesn't seem to - // be part of the shouldRetry function (see AdRoll/goamz/s3). - defer func() { - select { - case putErrChan <- nil: // for some reason, we do this no matter what. - case <-done: - return // ensure we don't leak the goroutine - } - }() - - if bytesRead <= 0 { - return - } - - var err error - var part s3.Part - - loop: - for retries := 0; retries < 5; retries++ { - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) - if err == nil { - break // success! - } - - // NOTE(stevvooe): This retry code tries to only retry under - // conditions where the s3 package does not. We may add s3 - // error codes to the below if we see others bubble up in the - // application. Right now, the most troubling is - // RequestTimeout, which seems to only triggered when a tcp - // connection to s3 slows to a crawl. If the RequestTimeout - // ends up getting added to the s3 library and we don't see - // other errors, this retry loop can be removed. - switch err := err.(type) { - case *s3.Error: - switch err.Code { - case "RequestTimeout": - // allow retries on only this error. - default: - break loop - } - } - - backoff := 100 * time.Millisecond * time.Duration(retries+1) - logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) - time.Sleep(backoff) - } - - if err != nil { - logrus.Errorf("error putting part, aborting: %v", err) - select { - case putErrChan <- err: - case <-done: - return // don't leak the goroutine - } - } - - // parts and partNumber are safe, because this function is the - // only one modifying them and we force it to be executed - // serially. - parts = append(parts, part) - partNumber++ - }(bytesRead, from, buf) - - buf = d.getbuf() // use a new buffer for the next call - return nil - } - - if offset > 0 { - resp, err := d.Bucket.Head(d.s3Path(path), nil) - if err != nil { - if s3Err, ok := err.(*s3.Error); !ok || s3Err.Code != "NoSuchKey" { - return 0, err - } - } - - currentLength := int64(0) - if err == nil { - currentLength = resp.ContentLength - } - - if currentLength >= offset { - if offset < d.ChunkSize { - // chunkSize > currentLength >= offset - if err = fromSmallCurrent(offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // currentLength >= offset >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - s3.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, - d.Bucket.Name+"/"+d.s3Path(path)) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - } - } else { - // Fills between parameters with 0s but only when to - from <= chunkSize - fromZeroFillSmall := func(from, to int64) error { - bytesRead = 0 - for from+int64(bytesRead) < to { - nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) - bytesRead += nn - if err != nil { - return err - } - } - - return nil - } - - // Fills between parameters with 0s, making new parts - fromZeroFillLarge := func(from, to int64) error { - bytesRead64 := int64(0) - for to-(from+bytesRead64) >= d.ChunkSize { - part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) - if err != nil { - return err - } - bytesRead64 += d.ChunkSize - - parts = append(parts, part) - partNumber++ - } - - return fromZeroFillSmall(0, (to-from)%d.ChunkSize) - } - - // currentLength < offset - if currentLength < d.ChunkSize { - if offset < d.ChunkSize { - // chunkSize > offset > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset); err != nil { - return totalRead, err - } - - if totalRead+offset < d.ChunkSize { - return totalRead, nil - } - } else { - // offset >= chunkSize > currentLength - if err = fromSmallCurrent(currentLength); err != nil { - return totalRead, err - } - - if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { - return totalRead, err - } - - part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) - if err != nil { - return totalRead, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from chunkSize up to offset, then some reader - if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { - return totalRead, err - } - - if err = fromReader(offset % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+(offset%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - } else { - // offset > currentLength >= chunkSize - _, part, err = multi.PutPartCopy(partNumber, - s3.CopyOptions{}, - d.Bucket.Name+"/"+d.s3Path(path)) - if err != nil { - return 0, err - } - - parts = append(parts, part) - partNumber++ - - //Zero fill from currentLength up to offset, then some reader - if err = fromZeroFillLarge(currentLength, offset); err != nil { - return totalRead, err - } - - if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { - return totalRead, err - } - - if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { - return totalRead, nil - } - } - - } - } - - for { - if err = fromReader(0); err != nil { - return totalRead, err - } - - if int64(bytesRead) < d.ChunkSize { - break - } - } - - return totalRead, nil -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) - if err != nil { - return nil, err - } - - fi := storagedriver.FileInfoFields{ - Path: path, - } - - if len(listResponse.Contents) == 1 { - if listResponse.Contents[0].Key != d.s3Path(path) { - fi.IsDir = true - } else { - fi.IsDir = false - fi.Size = listResponse.Contents[0].Size - - timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) - if err != nil { - return nil, err - } - fi.ModTime = timestamp - } - } else if len(listResponse.CommonPrefixes) == 1 { - fi.IsDir = true - } else { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, opath string) ([]string, error) { - path := opath - if path != "/" && path[len(path)-1] != '/' { - path = path + "/" - } - - // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". - // In those cases, there is no root prefix to replace and we must actually add a "/" to all - // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp - prefix := "" - if d.s3Path("") == "" { - prefix = "/" - } - - listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) - if err != nil { - return nil, parseError(opath, err) - } - - files := []string{} - directories := []string{} - - for { - for _, key := range listResponse.Contents { - files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1)) - } - - for _, commonPrefix := range listResponse.CommonPrefixes { - directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) - } - - if listResponse.IsTruncated { - listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) - if err != nil { - return nil, err - } - } else { - break - } - } - - if opath != "/" { - if len(files) == 0 && len(directories) == 0 { - // Treat empty response as missing directory, since we don't actually - // have directories in s3. - return nil, storagedriver.PathNotFoundError{Path: opath} - } - } - - return append(files, directories...), nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - /* This is terrible, but aws doesn't have an actual move. */ - _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), - s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) - if err != nil { - return parseError(sourcePath, err) - } - - return d.Delete(ctx, sourcePath) -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - listResponse, err := d.Bucket.List(d.s3Path(path), "", "", listMax) - if err != nil || len(listResponse.Contents) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - - s3Objects := make([]s3.Object, listMax) - - for len(listResponse.Contents) > 0 { - for index, key := range listResponse.Contents { - s3Objects[index].Key = key.Key - } - - err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:len(listResponse.Contents)]}) - if err != nil { - return nil - } - - listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax) - if err != nil { - return err - } - } - - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - methodString := "GET" - method, ok := options["method"] - if ok { - methodString, ok = method.(string) - if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", storagedriver.ErrUnsupportedMethod{} - } - } - - expiresTime := time.Now().Add(20 * time.Minute) - expires, ok := options["expiry"] - if ok { - et, ok := expires.(time.Time) - if ok { - expiresTime = et - } - } - - return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil -} - -func (d *driver) s3Path(path string) string { - return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") -} - -// S3BucketKey returns the s3 bucket key for the given storage driver path. -func (d *Driver) S3BucketKey(path string) string { - return d.StorageDriver.(*driver).s3Path(path) -} - -func parseError(path string, err error) error { - if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { - return storagedriver.PathNotFoundError{Path: path} - } - - return err -} - -func hasCode(err error, code string) bool { - s3err, ok := err.(*aws.Error) - return ok && s3err.Code == code -} - -func (d *driver) getOptions() s3.Options { - return s3.Options{SSE: d.Encrypt} -} - -func getPermissions() s3.ACL { - return s3.Private -} - -func (d *driver) getContentType() string { - return "application/octet-stream" -} - -// getbuf returns a buffer from the driver's pool with length d.ChunkSize. -func (d *driver) getbuf() []byte { - return d.pool.Get().([]byte) -} - -func (d *driver) putbuf(p []byte) { - copy(p, d.zeros) - d.pool.Put(p) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go deleted file mode 100644 index 70172a6de087..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/s3/s3_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package s3 - -import ( - "io/ioutil" - "os" - "strconv" - "testing" - - "github.com/AdRoll/goamz/aws" - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -var s3DriverConstructor func(rootDirectory string) (*Driver, error) -var skipS3 func() string - -func init() { - accessKey := os.Getenv("AWS_ACCESS_KEY") - secretKey := os.Getenv("AWS_SECRET_KEY") - bucket := os.Getenv("S3_BUCKET") - encrypt := os.Getenv("S3_ENCRYPT") - secure := os.Getenv("S3_SECURE") - v4auth := os.Getenv("S3_USE_V4_AUTH") - region := os.Getenv("AWS_REGION") - root, err := ioutil.TempDir("", "driver-") - if err != nil { - panic(err) - } - defer os.Remove(root) - - s3DriverConstructor = func(rootDirectory string) (*Driver, error) { - encryptBool := false - if encrypt != "" { - encryptBool, err = strconv.ParseBool(encrypt) - if err != nil { - return nil, err - } - } - - secureBool := true - if secure != "" { - secureBool, err = strconv.ParseBool(secure) - if err != nil { - return nil, err - } - } - - v4AuthBool := false - if v4auth != "" { - v4AuthBool, err = strconv.ParseBool(v4auth) - if err != nil { - return nil, err - } - } - - parameters := DriverParameters{ - accessKey, - secretKey, - bucket, - aws.GetRegion(region), - encryptBool, - secureBool, - v4AuthBool, - minChunkSize, - rootDirectory, - } - - return New(parameters) - } - - // Skip S3 storage driver tests if environment variable parameters are not provided - skipS3 = func() string { - if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { - return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" - } - return "" - } - - testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { - return s3DriverConstructor(root) - }, skipS3) -} - -func TestEmptyRootList(t *testing.T) { - if skipS3() != "" { - t.Skip(skipS3()) - } - - validRoot, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(validRoot) - - rootedDriver, err := s3DriverConstructor(validRoot) - if err != nil { - t.Fatalf("unexpected error creating rooted driver: %v", err) - } - - emptyRootDriver, err := s3DriverConstructor("") - if err != nil { - t.Fatalf("unexpected error creating empty root driver: %v", err) - } - - slashRootDriver, err := s3DriverConstructor("/") - if err != nil { - t.Fatalf("unexpected error creating slash root driver: %v", err) - } - - filename := "/test" - contents := []byte("contents") - ctx := context.Background() - err = rootedDriver.PutContent(ctx, filename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer rootedDriver.Delete(ctx, filename) - - keys, err := emptyRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } - - keys, err = slashRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go index dc8bdc8d43a8..548a17d84616 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/storagedriver.go @@ -34,7 +34,11 @@ func (version Version) Minor() uint { const CurrentVersion Version = "0.1" // StorageDriver defines methods that a Storage Driver must implement for a -// filesystem-like key/value object storage. +// filesystem-like key/value object storage. Storage Drivers are automatically +// registered via an internal registration mechanism, and generally created +// via the StorageDriverFactory interface (https://godoc.org/github.com/docker/distribution/registry/storage/driver/factory). +// Please see the aforementioned factory package for example code showing how to get an instance +// of a StorageDriver type StorageDriver interface { // Name returns the human-readable "name" of the driver, useful in error // messages and logging. By convention, this will just be the registration @@ -49,16 +53,14 @@ type StorageDriver interface { // This should primarily be used for small objects. PutContent(ctx context.Context, path string, content []byte) error - // ReadStream retrieves an io.ReadCloser for the content stored at "path" + // Reader retrieves an io.ReadCloser for the content stored at "path" // with a given byte offset. // May be used to resume reading a stream by providing a nonzero offset. - ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) + Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) - // WriteStream stores the contents of the provided io.ReadCloser at a - // location designated by the given path. - // May be used to resume writing a stream by providing a nonzero offset. - // The offset must be no larger than the CurrentSize for this path. - WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) + // Writer returns a FileWriter which will store the content written to it + // at the location designated by "path" after the call to Commit. + Writer(ctx context.Context, path string, append bool) (FileWriter, error) // Stat retrieves the FileInfo for the given path, including the current // size in bytes and the creation time. @@ -84,6 +86,25 @@ type StorageDriver interface { URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) } +// FileWriter provides an abstraction for an opened writable file-like object in +// the storage backend. The FileWriter must flush all content written to it on +// the call to Close, but is only required to make its content readable on a +// call to Commit. +type FileWriter interface { + io.WriteCloser + + // Size returns the number of bytes written to this FileWriter. + Size() int64 + + // Cancel removes any written content from this FileWriter. + Cancel() error + + // Commit flushes all content written to this FileWriter and makes it + // available for future calls to StorageDriver.GetContent and + // StorageDriver.Reader. + Commit() error +} + // PathRegexp is the regular expression which each file path must match. A // file path is absolute, beginning with a slash and containing a positive // number of path components separated by slashes, where each component is @@ -133,7 +154,7 @@ func (err InvalidOffsetError) Error() string { } // Error is a catch-all error type which captures an error string and -// the driver type on which it occured. +// the driver type on which it occurred. type Error struct { DriverName string Enclosed error diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go index 86bce794d27a..0cc037afda28 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/swift/swift.go @@ -16,8 +16,8 @@ package swift import ( + "bufio" "bytes" - "crypto/md5" "crypto/rand" "crypto/sha1" "crypto/tls" @@ -49,6 +49,9 @@ const defaultChunkSize = 20 * 1024 * 1024 // minChunkSize defines the minimum size of a segment const minChunkSize = 1 << 20 +// contentType defines the Content-Type header associated with stored segments +const contentType = "application/octet-stream" + // readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded var readAfterWriteTimeout = 15 * time.Second @@ -277,21 +280,21 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { if err == swift.ObjectNotFound { return nil, storagedriver.PathNotFoundError{Path: path} } - return content, nil + return content, err } // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, d.getContentType()) + err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, contentType) if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: path} } return err } -// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// Reader retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. -func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { +func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { headers := make(swift.Headers) headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" @@ -305,224 +308,46 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. return file, err } -// WriteStream stores the contents of the provided io.Reader at a -// location designated by the given path. The driver will know it has -// received the full contents when the reader returns io.EOF. The number -// of successfully READ bytes will be returned, even if an error is -// returned. May be used to resume writing a stream by providing a nonzero -// offset. Offsets past the current size will write from the position -// beyond the end of the file. -func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { +// Writer returns a FileWriter which will store the content written to it +// at the location designated by "path" after the call to Commit. +func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { var ( - segments []swift.Object - multi io.Reader - paddingReader io.Reader - currentLength int64 - cursor int64 - segmentPath string + segments []swift.Object + segmentsPath string + err error ) - partNumber := 1 - chunkSize := int64(d.ChunkSize) - zeroBuf := make([]byte, d.ChunkSize) - hash := md5.New() - - getSegment := func() string { - return fmt.Sprintf("%s/%016d", segmentPath, partNumber) - } - - max := func(a int64, b int64) int64 { - if a > b { - return a - } - return b - } - - createManifest := true - info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) - if err == nil { - manifest, ok := headers["X-Object-Manifest"] - if !ok { - if segmentPath, err = d.swiftSegmentPath(path); err != nil { - return 0, err - } - if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegment()); err != nil { - return 0, err - } - segments = append(segments, info) - } else { - _, segmentPath = parseManifest(manifest) - if segments, err = d.getAllSegments(segmentPath); err != nil { - return 0, err - } - createManifest = false - } - currentLength = info.Bytes - } else if err == swift.ObjectNotFound { - if segmentPath, err = d.swiftSegmentPath(path); err != nil { - return 0, err + if !append { + segmentsPath, err = d.swiftSegmentPath(path) + if err != nil { + return nil, err } } else { - return 0, err - } - - // First, we skip the existing segments that are not modified by this call - for i := range segments { - if offset < cursor+segments[i].Bytes { - break + info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } else if err != nil { + return nil, err } - cursor += segments[i].Bytes - hash.Write([]byte(segments[i].Hash)) - partNumber++ - } - - // We reached the end of the file but we haven't reached 'offset' yet - // Therefore we add blocks of zeros - if offset >= currentLength { - for offset-currentLength >= chunkSize { - // Insert a block a zero - headers, err := d.Conn.ObjectPut(d.Container, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) + manifest, ok := headers["X-Object-Manifest"] + if !ok { + segmentsPath, err = d.swiftSegmentPath(path) if err != nil { - if err == swift.ObjectNotFound { - return 0, storagedriver.PathNotFoundError{Path: getSegment()} - } - return 0, err - } - currentLength += chunkSize - partNumber++ - hash.Write([]byte(headers["Etag"])) - } - - cursor = currentLength - paddingReader = bytes.NewReader(zeroBuf) - } else if offset-cursor > 0 { - // Offset is inside the current segment : we need to read the - // data from the beginning of the segment to offset - file, _, err := d.Conn.ObjectOpen(d.Container, getSegment(), false, nil) - if err != nil { - if err == swift.ObjectNotFound { - return 0, storagedriver.PathNotFoundError{Path: getSegment()} - } - return 0, err - } - defer file.Close() - paddingReader = file - } - - readers := []io.Reader{} - if paddingReader != nil { - readers = append(readers, io.LimitReader(paddingReader, offset-cursor)) - } - readers = append(readers, io.LimitReader(reader, chunkSize-(offset-cursor))) - multi = io.MultiReader(readers...) - - writeSegment := func(segment string) (finished bool, bytesRead int64, err error) { - currentSegment, err := d.Conn.ObjectCreate(d.Container, segment, false, "", d.getContentType(), nil) - if err != nil { - if err == swift.ObjectNotFound { - return false, bytesRead, storagedriver.PathNotFoundError{Path: segment} + return nil, err } - return false, bytesRead, err - } - - segmentHash := md5.New() - writer := io.MultiWriter(currentSegment, segmentHash) - - n, err := io.Copy(writer, multi) - if err != nil { - return false, bytesRead, err - } - - if n > 0 { - defer func() { - closeError := currentSegment.Close() - if err != nil { - err = closeError - } - hexHash := hex.EncodeToString(segmentHash.Sum(nil)) - hash.Write([]byte(hexHash)) - }() - bytesRead += n - max(0, offset-cursor) - } - - if n < chunkSize { - // We wrote all the data - if cursor+n < currentLength { - // Copy the end of the chunk - headers := make(swift.Headers) - headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+chunkSize, 10) - file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) - if err != nil { - if err == swift.ObjectNotFound { - return false, bytesRead, storagedriver.PathNotFoundError{Path: path} - } - return false, bytesRead, err - } - - _, copyErr := io.Copy(writer, file) - - if err := file.Close(); err != nil { - if err == swift.ObjectNotFound { - return false, bytesRead, storagedriver.PathNotFoundError{Path: path} - } - return false, bytesRead, err - } - - if copyErr != nil { - return false, bytesRead, copyErr - } + if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegmentPath(segmentsPath, len(segments))); err != nil { + return nil, err } - - return true, bytesRead, nil - } - - multi = io.LimitReader(reader, chunkSize) - cursor += chunkSize - partNumber++ - - return false, bytesRead, nil - } - - finished := false - read := int64(0) - bytesRead := int64(0) - for finished == false { - finished, read, err = writeSegment(getSegment()) - bytesRead += read - if err != nil { - return bytesRead, err - } - } - - for ; partNumber < len(segments); partNumber++ { - hash.Write([]byte(segments[partNumber].Hash)) - } - - if createManifest { - if err := d.createManifest(path, d.Container+"/"+segmentPath); err != nil { - return 0, err - } - } - - expectedHash := hex.EncodeToString(hash.Sum(nil)) - waitingTime := readAfterWriteWait - endTime := time.Now().Add(readAfterWriteTimeout) - for { - var infos swift.Object - if infos, _, err = d.Conn.Object(d.Container, d.swiftPath(path)); err == nil { - if strings.Trim(infos.Hash, "\"") == expectedHash { - return bytesRead, nil + segments = []swift.Object{info} + } else { + _, segmentsPath = parseManifest(manifest) + if segments, err = d.getAllSegments(segmentsPath); err != nil { + return nil, err } - err = fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path) - } - if time.Now().Add(waitingTime).After(endTime) { - break } - time.Sleep(waitingTime) - waitingTime *= 2 } - return bytesRead, err + return d.newWriter(path, segmentsPath, segments), nil } // Stat retrieves the FileInfo for the given path, including the current size @@ -551,23 +376,26 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, fi.IsDir = true return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil } else if obj.Name == swiftPath { - // On Swift 1.12, the 'bytes' field is always 0 - // so we need to do a second HEAD request - info, _, err := d.Conn.Object(d.Container, swiftPath) - if err != nil { - if err == swift.ObjectNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} - } - return nil, err - } - fi.IsDir = false - fi.Size = info.Bytes - fi.ModTime = info.LastModified - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil + // The file exists. But on Swift 1.12, the 'bytes' field is always 0 so + // we need to do a separate HEAD request. + break } } - return nil, storagedriver.PathNotFoundError{Path: path} + //Don't trust an empty `objects` slice. A container listing can be + //outdated. For files, we can make a HEAD request on the object which + //reports existence (at least) much more reliably. + info, _, err := d.Conn.Object(d.Container, swiftPath) + if err != nil { + if err == swift.ObjectNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + fi.IsDir = false + fi.Size = info.Bytes + fi.ModTime = info.LastModified + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil } // List returns a list of the objects that are direct descendants of the given path. @@ -763,22 +591,59 @@ func (d *driver) swiftSegmentPath(path string) (string, error) { return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil } -func (d *driver) getContentType() string { - return "application/octet-stream" -} - func (d *driver) getAllSegments(path string) ([]swift.Object, error) { + //a simple container listing works 99.9% of the time segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) - if err == swift.ContainerNotFound { - return nil, storagedriver.PathNotFoundError{Path: path} + if err != nil { + if err == swift.ContainerNotFound { + return nil, storagedriver.PathNotFoundError{Path: path} + } + return nil, err + } + + //build a lookup table by object name + hasObjectName := make(map[string]struct{}) + for _, segment := range segments { + hasObjectName[segment.Name] = struct{}{} + } + + //The container listing might be outdated (i.e. not contain all existing + //segment objects yet) because of temporary inconsistency (Swift is only + //eventually consistent!). Check its completeness. + segmentNumber := 0 + for { + segmentNumber++ + segmentPath := getSegmentPath(path, segmentNumber) + + if _, seen := hasObjectName[segmentPath]; seen { + continue + } + + //This segment is missing in the container listing. Use a more reliable + //request to check its existence. (HEAD requests on segments are + //guaranteed to return the correct metadata, except for the pathological + //case of an outage of large parts of the Swift cluster or its network, + //since every segment is only written once.) + segment, _, err := d.Conn.Object(d.Container, segmentPath) + switch err { + case nil: + //found new segment -> keep going, more might be missing + segments = append(segments, segment) + continue + case swift.ObjectNotFound: + //This segment is missing. Since we upload segments sequentially, + //there won't be any more segments after it. + return segments, nil + default: + return nil, err //unexpected error + } } - return segments, err } func (d *driver) createManifest(path string, segments string) error { headers := make(swift.Headers) headers["X-Object-Manifest"] = segments - manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", d.getContentType(), headers) + manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", contentType, headers) if err != nil { if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: path} @@ -810,3 +675,159 @@ func generateSecret() (string, error) { } return hex.EncodeToString(secretBytes[:]), nil } + +func getSegmentPath(segmentsPath string, partNumber int) string { + return fmt.Sprintf("%s/%016d", segmentsPath, partNumber) +} + +type writer struct { + driver *driver + path string + segmentsPath string + size int64 + bw *bufio.Writer + closed bool + committed bool + cancelled bool +} + +func (d *driver) newWriter(path, segmentsPath string, segments []swift.Object) storagedriver.FileWriter { + var size int64 + for _, segment := range segments { + size += segment.Bytes + } + return &writer{ + driver: d, + path: path, + segmentsPath: segmentsPath, + size: size, + bw: bufio.NewWriterSize(&segmentWriter{ + conn: d.Conn, + container: d.Container, + segmentsPath: segmentsPath, + segmentNumber: len(segments) + 1, + maxChunkSize: d.ChunkSize, + }, d.ChunkSize), + } +} + +func (w *writer) Write(p []byte) (int, error) { + if w.closed { + return 0, fmt.Errorf("already closed") + } else if w.committed { + return 0, fmt.Errorf("already committed") + } else if w.cancelled { + return 0, fmt.Errorf("already cancelled") + } + + n, err := w.bw.Write(p) + w.size += int64(n) + return n, err +} + +func (w *writer) Size() int64 { + return w.size +} + +func (w *writer) Close() error { + if w.closed { + return fmt.Errorf("already closed") + } + + if err := w.bw.Flush(); err != nil { + return err + } + + if !w.committed && !w.cancelled { + if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil { + return err + } + if err := w.waitForSegmentsToShowUp(); err != nil { + return err + } + } + w.closed = true + + return nil +} + +func (w *writer) Cancel() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } + w.cancelled = true + return w.driver.Delete(context.Background(), w.path) +} + +func (w *writer) Commit() error { + if w.closed { + return fmt.Errorf("already closed") + } else if w.committed { + return fmt.Errorf("already committed") + } else if w.cancelled { + return fmt.Errorf("already cancelled") + } + + if err := w.bw.Flush(); err != nil { + return err + } + + if err := w.driver.createManifest(w.path, w.driver.Container+"/"+w.segmentsPath); err != nil { + return err + } + + w.committed = true + return w.waitForSegmentsToShowUp() +} + +func (w *writer) waitForSegmentsToShowUp() error { + var err error + waitingTime := readAfterWriteWait + endTime := time.Now().Add(readAfterWriteTimeout) + + for { + var info swift.Object + if info, _, err = w.driver.Conn.Object(w.driver.Container, w.driver.swiftPath(w.path)); err == nil { + if info.Bytes == w.size { + break + } + err = fmt.Errorf("Timeout expired while waiting for segments of %s to show up", w.path) + } + if time.Now().Add(waitingTime).After(endTime) { + break + } + time.Sleep(waitingTime) + waitingTime *= 2 + } + + return err +} + +type segmentWriter struct { + conn swift.Connection + container string + segmentsPath string + segmentNumber int + maxChunkSize int +} + +func (sw *segmentWriter) Write(p []byte) (int, error) { + n := 0 + for offset := 0; offset < len(p); offset += sw.maxChunkSize { + chunkSize := sw.maxChunkSize + if offset+chunkSize > len(p) { + chunkSize = len(p) - offset + } + _, err := sw.conn.ObjectPut(sw.container, getSegmentPath(sw.segmentsPath, sw.segmentNumber), bytes.NewReader(p[offset:offset+chunkSize]), false, "", contentType, nil) + if err != nil { + return n, err + } + + sw.segmentNumber++ + n += chunkSize + } + + return n, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go deleted file mode 100644 index 703003098829..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/driver/testsuites/testsuites.go +++ /dev/null @@ -1,1204 +0,0 @@ -package testsuites - -import ( - "bytes" - "crypto/sha1" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "path" - "sort" - "strings" - "sync" - "testing" - "time" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "gopkg.in/check.v1" -) - -// Test hooks up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -// RegisterSuite registers an in-process storage driver test suite with -// the go test runner. -func RegisterSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { - check.Suite(&DriverSuite{ - Constructor: driverConstructor, - SkipCheck: skipCheck, - ctx: context.Background(), - }) -} - -// SkipCheck is a function used to determine if a test suite should be skipped. -// If a SkipCheck returns a non-empty skip reason, the suite is skipped with -// the given reason. -type SkipCheck func() (reason string) - -// NeverSkip is a default SkipCheck which never skips the suite. -var NeverSkip SkipCheck = func() string { return "" } - -// DriverConstructor is a function which returns a new -// storagedriver.StorageDriver. -type DriverConstructor func() (storagedriver.StorageDriver, error) - -// DriverTeardown is a function which cleans up a suite's -// storagedriver.StorageDriver. -type DriverTeardown func() error - -// DriverSuite is a gocheck test suite designed to test a -// storagedriver.StorageDriver. The intended way to create a DriverSuite is -// with RegisterSuite. -type DriverSuite struct { - Constructor DriverConstructor - Teardown DriverTeardown - SkipCheck - storagedriver.StorageDriver - ctx context.Context -} - -// SetUpSuite sets up the gocheck test suite. -func (suite *DriverSuite) SetUpSuite(c *check.C) { - if reason := suite.SkipCheck(); reason != "" { - c.Skip(reason) - } - d, err := suite.Constructor() - c.Assert(err, check.IsNil) - suite.StorageDriver = d -} - -// TearDownSuite tears down the gocheck test suite. -func (suite *DriverSuite) TearDownSuite(c *check.C) { - if suite.Teardown != nil { - err := suite.Teardown() - c.Assert(err, check.IsNil) - } -} - -// TearDownTest tears down the gocheck test. -// This causes the suite to abort if any files are left around in the storage -// driver. -func (suite *DriverSuite) TearDownTest(c *check.C) { - files, _ := suite.StorageDriver.List(suite.ctx, "/") - if len(files) > 0 { - c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) - } -} - -// TestRootExists ensures that all storage drivers have a root path by default. -func (suite *DriverSuite) TestRootExists(c *check.C) { - _, err := suite.StorageDriver.List(suite.ctx, "/") - if err != nil { - c.Fatalf(`the root path "/" should always exist: %v`, err) - } -} - -// TestValidPaths checks that various valid file paths are accepted by the -// storage driver. -func (suite *DriverSuite) TestValidPaths(c *check.C) { - contents := randomContents(64) - validFiles := []string{ - "/a", - "/2", - "/aa", - "/a.a", - "/0-9/abcdefg", - "/abcdefg/z.75", - "/abc/1.2.3.4.5-6_zyx/123.z/4", - "/docker/docker-registry", - "/123.abc", - "/abc./abc", - "/.abc", - "/a--b", - "/a-.b", - "/_.abc", - "/Docker/docker-registry", - "/Abc/Cba"} - - for _, filename := range validFiles { - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) - } -} - -// TestInvalidPaths checks that various invalid file paths are rejected by the -// storage driver. -func (suite *DriverSuite) TestInvalidPaths(c *check.C) { - contents := randomContents(64) - invalidFiles := []string{ - "", - "/", - "abc", - "123.abc", - "//bcd", - "/abc_123/"} - - for _, filename := range invalidFiles { - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - } -} - -// TestWriteRead1 tests a simple write-read workflow. -func (suite *DriverSuite) TestWriteRead1(c *check.C) { - filename := randomPath(32) - contents := []byte("a") - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead2 tests a simple write-read workflow with unicode data. -func (suite *DriverSuite) TestWriteRead2(c *check.C) { - filename := randomPath(32) - contents := []byte("\xc3\x9f") - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead3 tests a simple write-read workflow with a small string. -func (suite *DriverSuite) TestWriteRead3(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteRead4 tests a simple write-read workflow with 1MB of data. -func (suite *DriverSuite) TestWriteRead4(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompare(c, filename, contents) -} - -// TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage -// driver safely. -func (suite *DriverSuite) TestWriteReadNonUTF8(c *check.C) { - filename := randomPath(32) - contents := []byte{0x80, 0x80, 0x80, 0x80} - suite.writeReadCompare(c, filename, contents) -} - -// TestTruncate tests that putting smaller contents than an original file does -// remove the excess contents. -func (suite *DriverSuite) TestTruncate(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompare(c, filename, contents) - - contents = randomContents(1024) - suite.writeReadCompare(c, filename, contents) -} - -// TestReadNonexistent tests reading content from an empty path. -func (suite *DriverSuite) TestReadNonexistent(c *check.C) { - filename := randomPath(32) - _, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestWriteReadStreams1 tests a simple write-read streaming workflow. -func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { - filename := randomPath(32) - contents := []byte("a") - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams2 tests a simple write-read streaming workflow with -// unicode data. -func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { - filename := randomPath(32) - contents := []byte("\xc3\x9f") - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams3 tests a simple write-read streaming workflow with a -// small amount of data. -func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB -// of data. -func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { - filename := randomPath(32) - contents := randomContents(1024 * 1024) - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the -// storage driver safely. -func (suite *DriverSuite) TestWriteReadStreamsNonUTF8(c *check.C) { - filename := randomPath(32) - contents := []byte{0x80, 0x80, 0x80, 0x80} - suite.writeReadCompareStreams(c, filename, contents) -} - -// TestWriteReadLargeStreams tests that a 5GB file may be written to the storage -// driver safely. -func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { - if testing.Short() { - c.Skip("Skipping test in short mode") - } - - filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - checksum := sha1.New() - var fileSize int64 = 5 * 1024 * 1024 * 1024 - - contents := newRandReader(fileSize) - written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, io.TeeReader(contents, checksum)) - c.Assert(err, check.IsNil) - c.Assert(written, check.Equals, fileSize) - - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - writtenChecksum := sha1.New() - io.Copy(writtenChecksum, reader) - - c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) -} - -// TestReadStreamWithOffset tests that the appropriate data is streamed when -// reading with a given offset. -func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { - filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - chunkSize := int64(32) - - contentsChunk1 := randomContents(chunkSize) - contentsChunk2 := randomContents(chunkSize) - contentsChunk3 := randomContents(chunkSize) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) - c.Assert(err, check.IsNil) - - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) - - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err = ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) - - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*2) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err = ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contentsChunk3) - - // Ensure we get invalid offest for negative offsets. - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, -1) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) - c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) - c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) - c.Assert(reader, check.IsNil) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - // Read past the end of the content and make sure we get a reader that - // returns 0 bytes and io.EOF - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3) - c.Assert(err, check.IsNil) - defer reader.Close() - - buf := make([]byte, chunkSize) - n, err := reader.Read(buf) - c.Assert(err, check.Equals, io.EOF) - c.Assert(n, check.Equals, 0) - - // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. - reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3-1) - c.Assert(err, check.IsNil) - defer reader.Close() - - n, err = reader.Read(buf) - c.Assert(n, check.Equals, 1) - - // We don't care whether the io.EOF comes on the this read or the first - // zero read, but the only error acceptable here is io.EOF. - if err != nil { - c.Assert(err, check.Equals, io.EOF) - } - - // Any more reads should result in zero bytes and io.EOF - n, err = reader.Read(buf) - c.Assert(n, check.Equals, 0) - c.Assert(err, check.Equals, io.EOF) -} - -// TestContinueStreamAppendLarge tests that a stream write can be appended to without -// corrupting the data with a large chunk size. -func (suite *DriverSuite) TestContinueStreamAppendLarge(c *check.C) { - suite.testContinueStreamAppend(c, int64(10*1024*1024)) -} - -// TestContinueStreamAppendSmall is the same as TestContinueStreamAppendLarge, but only -// with a tiny chunk size in order to test corner cases for some cloud storage drivers. -func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { - suite.testContinueStreamAppend(c, int64(32)) -} - -func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { - filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - contentsChunk1 := randomContents(chunkSize) - contentsChunk2 := randomContents(chunkSize) - contentsChunk3 := randomContents(chunkSize) - contentsChunk4 := randomContents(chunkSize) - zeroChunk := make([]byte, int64(chunkSize)) - - fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) - - nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contentsChunk1)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk1))) - - fi, err := suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, int64(len(contentsChunk1))) - - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(contentsChunk2)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - - fi, err = suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, 2*chunkSize) - - // Test re-writing the last chunk - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size()-chunkSize, bytes.NewReader(contentsChunk2)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contentsChunk2))) - - fi, err = suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, 2*chunkSize) - - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(fullContents[fi.Size():]))) - - received, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, fullContents) - - // Writing past size of file extends file (no offset error). We would like - // to write chunk 4 one chunk length past chunk 3. It should be successful - // and the resulting file will be 5 chunks long, with a chunk of all - // zeros. - - fullContents = append(fullContents, zeroChunk...) - fullContents = append(fullContents, contentsChunk4...) - - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, int64(len(fullContents))-chunkSize, bytes.NewReader(contentsChunk4)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, chunkSize) - - fi, err = suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Size(), check.Equals, int64(len(fullContents))) - - received, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(len(received), check.Equals, len(fullContents)) - c.Assert(received[chunkSize*3:chunkSize*4], check.DeepEquals, zeroChunk) - c.Assert(received[chunkSize*4:chunkSize*5], check.DeepEquals, contentsChunk4) - c.Assert(received, check.DeepEquals, fullContents) - - // Ensure that negative offsets return correct error. - nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, -1, bytes.NewReader(zeroChunk)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) - c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) - c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestReadNonexistentStream tests that reading a stream for a nonexistent path -// fails. -func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { - filename := randomPath(32) - - _, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.ReadStream(suite.ctx, filename, 64) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestList checks the returned list of keys after populating a directory tree. -func (suite *DriverSuite) TestList(c *check.C) { - rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) - defer suite.StorageDriver.Delete(suite.ctx, rootDirectory) - - doesnotexist := path.Join(rootDirectory, "nonexistent") - _, err := suite.StorageDriver.List(suite.ctx, doesnotexist) - c.Assert(err, check.Equals, storagedriver.PathNotFoundError{ - Path: doesnotexist, - DriverName: suite.StorageDriver.Name(), - }) - - parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) - childFiles := make([]string, 50) - for i := 0; i < len(childFiles); i++ { - childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) - childFiles[i] = childFile - err := suite.StorageDriver.PutContent(suite.ctx, childFile, randomContents(32)) - c.Assert(err, check.IsNil) - } - sort.Strings(childFiles) - - keys, err := suite.StorageDriver.List(suite.ctx, "/") - c.Assert(err, check.IsNil) - c.Assert(keys, check.DeepEquals, []string{rootDirectory}) - - keys, err = suite.StorageDriver.List(suite.ctx, rootDirectory) - c.Assert(err, check.IsNil) - c.Assert(keys, check.DeepEquals, []string{parentDirectory}) - - keys, err = suite.StorageDriver.List(suite.ctx, parentDirectory) - c.Assert(err, check.IsNil) - - sort.Strings(keys) - c.Assert(keys, check.DeepEquals, childFiles) - - // A few checks to add here (check out #819 for more discussion on this): - // 1. Ensure that all paths are absolute. - // 2. Ensure that listings only include direct children. - // 3. Ensure that we only respond to directory listings that end with a slash (maybe?). -} - -// TestMove checks that a moved object no longer exists at the source path and -// does exist at the destination. -func (suite *DriverSuite) TestMove(c *check.C) { - contents := randomContents(32) - sourcePath := randomPath(32) - destPath := randomPath(32) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(sourcePath)) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) - - err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) - - _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestMoveOverwrite checks that a moved object no longer exists at the source -// path and overwrites the contents at the destination. -func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { - sourcePath := randomPath(32) - destPath := randomPath(32) - sourceContents := randomContents(32) - destContents := randomContents(64) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(sourcePath)) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) - - err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, destPath, destContents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) - c.Assert(err, check.IsNil) - - received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, sourceContents) - - _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestMoveNonexistent checks that moving a nonexistent key fails and does not -// delete the data at the destination path. -func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { - contents := randomContents(32) - sourcePath := randomPath(32) - destPath := randomPath(32) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(destPath)) - - err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) - c.Assert(err, check.IsNil) - c.Assert(received, check.DeepEquals, contents) -} - -// TestMoveInvalid provides various checks for invalid moves. -func (suite *DriverSuite) TestMoveInvalid(c *check.C) { - contents := randomContents(32) - - // Create a regular file. - err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents) - c.Assert(err, check.IsNil) - defer suite.StorageDriver.Delete(suite.ctx, "/notadir") - - // Now try to move a non-existent file under it. - err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar") - c.Assert(err, check.NotNil) // non-nil error -} - -// TestDelete checks that the delete operation removes data from the storage -// driver -func (suite *DriverSuite) TestDelete(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(suite.ctx, filename) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestURLFor checks that the URLFor method functions properly, but only if it -// is implemented -func (suite *DriverSuite) TestURLFor(c *check.C) { - filename := randomPath(32) - contents := randomContents(32) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - url, err := suite.StorageDriver.URLFor(suite.ctx, filename, nil) - if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { - return - } - c.Assert(err, check.IsNil) - - response, err := http.Get(url) - c.Assert(err, check.IsNil) - defer response.Body.Close() - - read, err := ioutil.ReadAll(response.Body) - c.Assert(err, check.IsNil) - c.Assert(read, check.DeepEquals, contents) - - url, err = suite.StorageDriver.URLFor(suite.ctx, filename, map[string]interface{}{"method": "HEAD"}) - if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { - return - } - c.Assert(err, check.IsNil) - - response, err = http.Head(url) - c.Assert(response.StatusCode, check.Equals, 200) - c.Assert(response.ContentLength, check.Equals, int64(32)) -} - -// TestDeleteNonexistent checks that removing a nonexistent key fails. -func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { - filename := randomPath(32) - err := suite.StorageDriver.Delete(suite.ctx, filename) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestDeleteFolder checks that deleting a folder removes all child elements. -func (suite *DriverSuite) TestDeleteFolder(c *check.C) { - dirname := randomPath(32) - filename1 := randomPath(32) - filename2 := randomPath(32) - filename3 := randomPath(32) - contents := randomContents(32) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(dirname)) - - err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename2), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename3), contents) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename1)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) - c.Assert(err, check.IsNil) - - err = suite.StorageDriver.Delete(suite.ctx, dirname) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - - _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) -} - -// TestStatCall runs verifies the implementation of the storagedriver's Stat call. -func (suite *DriverSuite) TestStatCall(c *check.C) { - content := randomContents(4096) - dirPath := randomPath(32) - fileName := randomFilename(32) - filePath := path.Join(dirPath, fileName) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(dirPath)) - - // Call on non-existent file/dir, check error. - fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - c.Assert(fi, check.IsNil) - - fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) - c.Assert(err, check.NotNil) - c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) - c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) - c.Assert(fi, check.IsNil) - - err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) - c.Assert(err, check.IsNil) - - // Call on regular file, check results - fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Path(), check.Equals, filePath) - c.Assert(fi.Size(), check.Equals, int64(len(content))) - c.Assert(fi.IsDir(), check.Equals, false) - createdTime := fi.ModTime() - - // Sleep and modify the file - time.Sleep(time.Second * 10) - content = randomContents(4096) - err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) - c.Assert(err, check.IsNil) - fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) - - // Check if the modification time is after the creation time. - // In case of cloud storage services, storage frontend nodes might have - // time drift between them, however that should be solved with sleeping - // before update. - modTime := fi.ModTime() - if !modTime.After(createdTime) { - c.Errorf("modtime (%s) is before the creation time (%s)", modTime, createdTime) - } - - // Call on directory (do not check ModTime as dirs don't need to support it) - fi, err = suite.StorageDriver.Stat(suite.ctx, dirPath) - c.Assert(err, check.IsNil) - c.Assert(fi, check.NotNil) - c.Assert(fi.Path(), check.Equals, dirPath) - c.Assert(fi.Size(), check.Equals, int64(0)) - c.Assert(fi.IsDir(), check.Equals, true) -} - -// TestPutContentMultipleTimes checks that if storage driver can overwrite the content -// in the subsequent puts. Validates that PutContent does not have to work -// with an offset like WriteStream does and overwrites the file entirely -// rather than writing the data to the [0,len(data)) of the file. -func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { - filename := randomPath(32) - contents := randomContents(4096) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - contents = randomContents(2048) // upload a different, smaller file - err = suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contents) -} - -// TestConcurrentStreamReads checks that multiple clients can safely read from -// the same file simultaneously with various offsets. -func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { - var filesize int64 = 128 * 1024 * 1024 - - if testing.Short() { - filesize = 10 * 1024 * 1024 - c.Log("Reducing file size to 10MB for short mode") - } - - filename := randomPath(32) - contents := randomContents(filesize) - - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - var wg sync.WaitGroup - - readContents := func() { - defer wg.Done() - offset := rand.Int63n(int64(len(contents))) - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) - c.Assert(err, check.IsNil) - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - c.Assert(readContents, check.DeepEquals, contents[offset:]) - } - - wg.Add(10) - for i := 0; i < 10; i++ { - go readContents() - } - wg.Wait() -} - -// TestConcurrentFileStreams checks that multiple *os.File objects can be passed -// in to WriteStream concurrently without hanging. -func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { - numStreams := 32 - - if testing.Short() { - numStreams = 8 - c.Log("Reducing number of streams to 8 for short mode") - } - - var wg sync.WaitGroup - - testStream := func(size int64) { - defer wg.Done() - suite.testFileStreams(c, size) - } - - wg.Add(numStreams) - for i := numStreams; i > 0; i-- { - go testStream(int64(numStreams) * 1024 * 1024) - } - - wg.Wait() -} - -// TestEventualConsistency checks that if stat says that a file is a certain size, then -// you can freely read from the file (this is the only guarantee that the driver needs to provide) -func (suite *DriverSuite) TestEventualConsistency(c *check.C) { - if testing.Short() { - c.Skip("Skipping test in short mode") - } - - filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - var offset int64 - var misswrites int - var chunkSize int64 = 32 - - for i := 0; i < 1024; i++ { - contents := randomContents(chunkSize) - read, err := suite.StorageDriver.WriteStream(suite.ctx, filename, offset, bytes.NewReader(contents)) - c.Assert(err, check.IsNil) - - fi, err := suite.StorageDriver.Stat(suite.ctx, filename) - c.Assert(err, check.IsNil) - - // We are most concerned with being able to read data as soon as Stat declares - // it is uploaded. This is the strongest guarantee that some drivers (that guarantee - // at best eventual consistency) absolutely need to provide. - if fi.Size() == offset+chunkSize { - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) - c.Assert(err, check.IsNil) - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) - - reader.Close() - offset += read - } else { - misswrites++ - } - } - - if misswrites > 0 { - c.Log("There were " + string(misswrites) + " occurences of a write not being instantly available.") - } - - c.Assert(misswrites, check.Not(check.Equals), 1024) -} - -// BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files -func (suite *DriverSuite) BenchmarkPutGetEmptyFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 0) -} - -// BenchmarkPutGet1KBFiles benchmarks PutContent/GetContent for 1KB files -func (suite *DriverSuite) BenchmarkPutGet1KBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024) -} - -// BenchmarkPutGet1MBFiles benchmarks PutContent/GetContent for 1MB files -func (suite *DriverSuite) BenchmarkPutGet1MBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024*1024) -} - -// BenchmarkPutGet1GBFiles benchmarks PutContent/GetContent for 1GB files -func (suite *DriverSuite) BenchmarkPutGet1GBFiles(c *check.C) { - suite.benchmarkPutGetFiles(c, 1024*1024*1024) -} - -func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { - c.SetBytes(size) - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - }() - - for i := 0; i < c.N; i++ { - filename := path.Join(parentDir, randomPath(32)) - err := suite.StorageDriver.PutContent(suite.ctx, filename, randomContents(size)) - c.Assert(err, check.IsNil) - - _, err = suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - } -} - -// BenchmarkStreamEmptyFiles benchmarks WriteStream/ReadStream for 0B files -func (suite *DriverSuite) BenchmarkStreamEmptyFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 0) -} - -// BenchmarkStream1KBFiles benchmarks WriteStream/ReadStream for 1KB files -func (suite *DriverSuite) BenchmarkStream1KBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024) -} - -// BenchmarkStream1MBFiles benchmarks WriteStream/ReadStream for 1MB files -func (suite *DriverSuite) BenchmarkStream1MBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024*1024) -} - -// BenchmarkStream1GBFiles benchmarks WriteStream/ReadStream for 1GB files -func (suite *DriverSuite) BenchmarkStream1GBFiles(c *check.C) { - suite.benchmarkStreamFiles(c, 1024*1024*1024) -} - -func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { - c.SetBytes(size) - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - }() - - for i := 0; i < c.N; i++ { - filename := path.Join(parentDir, randomPath(32)) - written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(randomContents(size))) - c.Assert(err, check.IsNil) - c.Assert(written, check.Equals, size) - - rc, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - rc.Close() - } -} - -// BenchmarkList5Files benchmarks List for 5 small files -func (suite *DriverSuite) BenchmarkList5Files(c *check.C) { - suite.benchmarkListFiles(c, 5) -} - -// BenchmarkList50Files benchmarks List for 50 small files -func (suite *DriverSuite) BenchmarkList50Files(c *check.C) { - suite.benchmarkListFiles(c, 50) -} - -func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { - parentDir := randomPath(8) - defer func() { - c.StopTimer() - suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - }() - - for i := int64(0); i < numFiles; i++ { - err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) - c.Assert(err, check.IsNil) - } - - c.ResetTimer() - for i := 0; i < c.N; i++ { - files, err := suite.StorageDriver.List(suite.ctx, parentDir) - c.Assert(err, check.IsNil) - c.Assert(int64(len(files)), check.Equals, numFiles) - } -} - -// BenchmarkDelete5Files benchmarks Delete for 5 small files -func (suite *DriverSuite) BenchmarkDelete5Files(c *check.C) { - suite.benchmarkDeleteFiles(c, 5) -} - -// BenchmarkDelete50Files benchmarks Delete for 50 small files -func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { - suite.benchmarkDeleteFiles(c, 50) -} - -func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { - for i := 0; i < c.N; i++ { - parentDir := randomPath(8) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - - c.StopTimer() - for j := int64(0); j < numFiles; j++ { - err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) - c.Assert(err, check.IsNil) - } - c.StartTimer() - - // This is the operation we're benchmarking - err := suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) - c.Assert(err, check.IsNil) - } -} - -func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { - tf, err := ioutil.TempFile("", "tf") - c.Assert(err, check.IsNil) - defer os.Remove(tf.Name()) - defer tf.Close() - - filename := randomPath(32) - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - contents := randomContents(size) - - _, err = tf.Write(contents) - c.Assert(err, check.IsNil) - - tf.Sync() - tf.Seek(0, os.SEEK_SET) - - nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, tf) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, size) - - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) - c.Assert(err, check.IsNil) - - readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { - defer suite.StorageDriver.Delete(suite.ctx, firstPart(filename)) - - nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contents)) - c.Assert(err, check.IsNil) - c.Assert(nn, check.Equals, int64(len(contents))) - - reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) - c.Assert(err, check.IsNil) - defer reader.Close() - - readContents, err := ioutil.ReadAll(reader) - c.Assert(err, check.IsNil) - - c.Assert(readContents, check.DeepEquals, contents) -} - -var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") -var separatorChars = []byte("._-") - -func randomPath(length int64) string { - path := "/" - for int64(len(path)) < length { - chunkLength := rand.Int63n(length-int64(len(path))) + 1 - chunk := randomFilename(chunkLength) - path += chunk - remaining := length - int64(len(path)) - if remaining == 1 { - path += randomFilename(1) - } else if remaining > 1 { - path += "/" - } - } - return path -} - -func randomFilename(length int64) string { - b := make([]byte, length) - wasSeparator := true - for i := range b { - if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { - b[i] = separatorChars[rand.Intn(len(separatorChars))] - wasSeparator = true - } else { - b[i] = filenameChars[rand.Intn(len(filenameChars))] - wasSeparator = false - } - } - return string(b) -} - -// randomBytes pre-allocates all of the memory sizes needed for the test. If -// anything panics while accessing randomBytes, just make this number bigger. -var randomBytes = make([]byte, 96<<20) - -func init() { - // increase the random bytes to the required maximum - for i := range randomBytes { - randomBytes[i] = byte(rand.Intn(2 << 8)) - } -} - -func randomContents(length int64) []byte { - return randomBytes[:length] -} - -type randReader struct { - r int64 - m sync.Mutex -} - -func (rr *randReader) Read(p []byte) (n int, err error) { - rr.m.Lock() - defer rr.m.Unlock() - - n = copy(p, randomContents(int64(len(p)))) - rr.r -= int64(n) - - if rr.r <= 0 { - err = io.EOF - } - - return -} - -func newRandReader(n int64) *randReader { - return &randReader{r: n} -} - -func firstPart(filePath string) string { - if filePath == "" { - return "/" - } - for { - if filePath[len(filePath)-1] == '/' { - filePath = filePath[:len(filePath)-1] - } - - dir, file := path.Split(filePath) - if dir == "" && file == "" { - return "/" - } - if dir == "/" || dir == "" { - return "/" + file - } - if file == "" { - return dir - } - filePath = dir - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go index b3a5f5203e37..3b06c81790ca 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filereader.go @@ -119,7 +119,7 @@ func (fr *fileReader) reader() (io.Reader, error) { } // If we don't have a reader, open one up. - rc, err := fr.driver.ReadStream(fr.ctx, fr.path, fr.offset) + rc, err := fr.driver.Reader(fr.ctx, fr.path, fr.offset) if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go deleted file mode 100644 index 529fa6736535..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter.go +++ /dev/null @@ -1,180 +0,0 @@ -package storage - -import ( - "bufio" - "bytes" - "fmt" - "io" - "os" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" -) - -const ( - fileWriterBufferSize = 5 << 20 -) - -// fileWriter implements a remote file writer backed by a storage driver. -type fileWriter struct { - driver storagedriver.StorageDriver - - ctx context.Context - - // identifying fields - path string - - // mutable fields - size int64 // size of the file, aka the current end - offset int64 // offset is the current write offset - err error // terminal error, if set, reader is closed -} - -type bufferedFileWriter struct { - fileWriter - bw *bufio.Writer -} - -// fileWriterInterface makes the desired io compliant interface that the -// filewriter should implement. -type fileWriterInterface interface { - io.WriteSeeker - io.ReaderFrom - io.Closer -} - -var _ fileWriterInterface = &fileWriter{} - -// newFileWriter returns a prepared fileWriter for the driver and path. This -// could be considered similar to an "open" call on a regular filesystem. -func newFileWriter(ctx context.Context, driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) { - fw := fileWriter{ - driver: driver, - path: path, - ctx: ctx, - } - - if fi, err := driver.Stat(ctx, path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // ignore, offset is zero - default: - return nil, err - } - } else { - if fi.IsDir() { - return nil, fmt.Errorf("cannot write to a directory") - } - - fw.size = fi.Size() - } - - buffered := bufferedFileWriter{ - fileWriter: fw, - } - buffered.bw = bufio.NewWriterSize(&buffered.fileWriter, fileWriterBufferSize) - - return &buffered, nil -} - -// wraps the fileWriter.Write method to buffer small writes -func (bfw *bufferedFileWriter) Write(p []byte) (int, error) { - return bfw.bw.Write(p) -} - -// wraps fileWriter.Close to ensure the buffer is flushed -// before we close the writer. -func (bfw *bufferedFileWriter) Close() (err error) { - if err = bfw.Flush(); err != nil { - return err - } - err = bfw.fileWriter.Close() - return err -} - -// wraps fileWriter.Seek to ensure offset is handled -// correctly in respect to pending data in the buffer -func (bfw *bufferedFileWriter) Seek(offset int64, whence int) (int64, error) { - if err := bfw.Flush(); err != nil { - return 0, err - } - return bfw.fileWriter.Seek(offset, whence) -} - -// wraps bufio.Writer.Flush to allow intermediate flushes -// of the bufferedFileWriter -func (bfw *bufferedFileWriter) Flush() error { - return bfw.bw.Flush() -} - -// Write writes the buffer p at the current write offset. -func (fw *fileWriter) Write(p []byte) (n int, err error) { - nn, err := fw.ReadFrom(bytes.NewReader(p)) - return int(nn), err -} - -// ReadFrom reads reader r until io.EOF writing the contents at the current -// offset. -func (fw *fileWriter) ReadFrom(r io.Reader) (n int64, err error) { - if fw.err != nil { - return 0, fw.err - } - - nn, err := fw.driver.WriteStream(fw.ctx, fw.path, fw.offset, r) - - // We should forward the offset, whether or not there was an error. - // Basically, we keep the filewriter in sync with the reader's head. If an - // error is encountered, the whole thing should be retried but we proceed - // from an expected offset, even if the data didn't make it to the - // backend. - fw.offset += nn - - if fw.offset > fw.size { - fw.size = fw.offset - } - - return nn, err -} - -// Seek moves the write position do the requested offest based on the whence -// argument, which can be os.SEEK_CUR, os.SEEK_END, or os.SEEK_SET. -func (fw *fileWriter) Seek(offset int64, whence int) (int64, error) { - if fw.err != nil { - return 0, fw.err - } - - var err error - newOffset := fw.offset - - switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset = fw.size + int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) - } - - if newOffset < 0 { - err = fmt.Errorf("cannot seek to negative position") - } else { - // No problems, set the offset. - fw.offset = newOffset - } - - return fw.offset, err -} - -// Close closes the fileWriter for writing. -// Calling it once is valid and correct and it will -// return a nil error. Calling it subsequent times will -// detect that fw.err has been set and will return the error. -func (fw *fileWriter) Close() error { - if fw.err != nil { - return fw.err - } - - fw.err = fmt.Errorf("filewriter@%v: closed", fw.path) - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter_test.go deleted file mode 100644 index 858b03272689..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/filewriter_test.go +++ /dev/null @@ -1,262 +0,0 @@ -package storage - -import ( - "bytes" - "crypto/rand" - "io" - "os" - "testing" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -// TestSimpleWrite takes the fileWriter through common write operations -// ensuring data integrity. -func TestSimpleWrite(t *testing.T) { - content := make([]byte, 1<<20) - n, err := rand.Read(content) - if err != nil { - t.Fatalf("unexpected error building random data: %v", err) - } - - if n != len(content) { - t.Fatalf("random read did't fill buffer") - } - - dgst, err := digest.FromReader(bytes.NewReader(content)) - if err != nil { - t.Fatalf("unexpected error digesting random content: %v", err) - } - - driver := inmemory.New() - path := "/random" - ctx := context.Background() - - fw, err := newFileWriter(ctx, driver, path) - if err != nil { - t.Fatalf("unexpected error creating fileWriter: %v", err) - } - defer fw.Close() - - n, err = fw.Write(content) - if err != nil { - t.Fatalf("unexpected error writing content: %v", err) - } - fw.Flush() - - if n != len(content) { - t.Fatalf("unexpected write length: %d != %d", n, len(content)) - } - - fr, err := newFileReader(ctx, driver, path, int64(len(content))) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - verifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify write data") - } - - // Check the seek position is equal to the content length - end, err := fw.Seek(0, os.SEEK_END) - if err != nil { - t.Fatalf("unexpected error seeking: %v", err) - } - - if end != int64(len(content)) { - t.Fatalf("write did not advance offset: %d != %d", end, len(content)) - } - - // Double the content - doubled := append(content, content...) - doubledgst, err := digest.FromReader(bytes.NewReader(doubled)) - if err != nil { - t.Fatalf("unexpected error digesting doubled content: %v", err) - } - - nn, err := fw.ReadFrom(bytes.NewReader(content)) - if err != nil { - t.Fatalf("unexpected error doubling content: %v", err) - } - - if nn != int64(len(content)) { - t.Fatalf("writeat was short: %d != %d", n, len(content)) - } - - fr, err = newFileReader(ctx, driver, path, int64(len(doubled))) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - verifier, err = digest.NewDigestVerifier(doubledgst) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify write data") - } - - // Check that Write updated the offset. - end, err = fw.Seek(0, os.SEEK_END) - if err != nil { - t.Fatalf("unexpected error seeking: %v", err) - } - - if end != int64(len(doubled)) { - t.Fatalf("write did not advance offset: %d != %d", end, len(doubled)) - } - - // Now, we copy from one path to another, running the data through the - // fileReader to fileWriter, rather than the driver.Move command to ensure - // everything is working correctly. - fr, err = newFileReader(ctx, driver, path, int64(len(doubled))) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - fw, err = newFileWriter(ctx, driver, "/copied") - if err != nil { - t.Fatalf("unexpected error creating fileWriter: %v", err) - } - defer fw.Close() - - nn, err = io.Copy(fw, fr) - if err != nil { - t.Fatalf("unexpected error copying data: %v", err) - } - - if nn != int64(len(doubled)) { - t.Fatalf("unexpected copy length: %d != %d", nn, len(doubled)) - } - - fr, err = newFileReader(ctx, driver, "/copied", int64(len(doubled))) - if err != nil { - t.Fatalf("unexpected error creating fileReader: %v", err) - } - defer fr.Close() - - verifier, err = digest.NewDigestVerifier(doubledgst) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - io.Copy(verifier, fr) - - if !verifier.Verified() { - t.Fatalf("unable to verify write data") - } -} - -func TestBufferedFileWriter(t *testing.T) { - ctx := context.Background() - writer, err := newFileWriter(ctx, inmemory.New(), "/random") - - if err != nil { - t.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) - } - - // write one byte and ensure the offset hasn't been incremented. - // offset will only get incremented when the buffer gets flushed - short := []byte{byte(1)} - - writer.Write(short) - - if writer.offset > 0 { - t.Fatalf("WriteStream called prematurely") - } - - // write enough data to cause the buffer to flush and confirm - // the offset has been incremented - long := make([]byte, fileWriterBufferSize) - _, err = rand.Read(long) - if err != nil { - t.Fatalf("unexpected error building random data: %v", err) - } - for i := range long { - long[i] = byte(i) - } - writer.Write(long) - writer.Close() - if writer.offset != (fileWriterBufferSize + 1) { - t.Fatalf("WriteStream not called when buffer capacity reached") - } -} - -func BenchmarkFileWriter(b *testing.B) { - b.StopTimer() // not sure how long setup above will take - for i := 0; i < b.N; i++ { - // Start basic fileWriter initialization - fw := fileWriter{ - driver: inmemory.New(), - path: "/random", - } - ctx := context.Background() - if fi, err := fw.driver.Stat(ctx, fw.path); err != nil { - switch err := err.(type) { - case storagedriver.PathNotFoundError: - // ignore, offset is zero - default: - b.Fatalf("Failed to initialize fileWriter: %v", err.Error()) - } - } else { - if fi.IsDir() { - b.Fatalf("Cannot write to a directory") - } - - fw.size = fi.Size() - } - - randomBytes := make([]byte, 1<<20) - _, err := rand.Read(randomBytes) - if err != nil { - b.Fatalf("unexpected error building random data: %v", err) - } - // End basic file writer initialization - - b.StartTimer() - for j := 0; j < 100; j++ { - fw.Write(randomBytes) - } - b.StopTimer() - } -} - -func BenchmarkBufferedFileWriter(b *testing.B) { - b.StopTimer() // not sure how long setup above will take - ctx := context.Background() - for i := 0; i < b.N; i++ { - bfw, err := newFileWriter(ctx, inmemory.New(), "/random") - - if err != nil { - b.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) - } - - randomBytes := make([]byte, 1<<20) - _, err = rand.Read(randomBytes) - if err != nil { - b.Fatalf("unexpected error building random data: %v", err) - } - - b.StartTimer() - for j := 0; j < 100; j++ { - bfw.Write(randomBytes) - } - b.StopTimer() - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/garbagecollect.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/garbagecollect.go new file mode 100644 index 000000000000..be64b847401a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/garbagecollect.go @@ -0,0 +1,150 @@ +package storage + +import ( + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" +) + +func emit(format string, a ...interface{}) { + fmt.Printf(format+"\n", a...) +} + +// MarkAndSweep performs a mark and sweep of registry data +func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, dryRun bool) error { + repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator) + if !ok { + return fmt.Errorf("unable to convert Namespace to RepositoryEnumerator") + } + + // mark + markSet := make(map[digest.Digest]struct{}) + err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error { + if dryRun { + emit(repoName) + } + + var err error + named, err := reference.ParseNamed(repoName) + if err != nil { + return fmt.Errorf("failed to parse repo name %s: %v", repoName, err) + } + repository, err := registry.Repository(ctx, named) + if err != nil { + return fmt.Errorf("failed to construct repository: %v", err) + } + + manifestService, err := repository.Manifests(ctx) + if err != nil { + return fmt.Errorf("failed to construct manifest service: %v", err) + } + + manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator) + if !ok { + return fmt.Errorf("unable to convert ManifestService into ManifestEnumerator") + } + + err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { + // Mark the manifest's blob + if dryRun { + emit("%s: marking manifest %s ", repoName, dgst) + } + markSet[dgst] = struct{}{} + + manifest, err := manifestService.Get(ctx, dgst) + if err != nil { + return fmt.Errorf("failed to retrieve manifest for digest %v: %v", dgst, err) + } + + descriptors := manifest.References() + for _, descriptor := range descriptors { + markSet[descriptor.Digest] = struct{}{} + if dryRun { + emit("%s: marking blob %s", repoName, descriptor.Digest) + } + } + + switch manifest.(type) { + case *schema1.SignedManifest: + signaturesGetter, ok := manifestService.(distribution.SignaturesGetter) + if !ok { + return fmt.Errorf("unable to convert ManifestService into SignaturesGetter") + } + signatures, err := signaturesGetter.GetSignatures(ctx, dgst) + if err != nil { + return fmt.Errorf("failed to get signatures for signed manifest: %v", err) + } + for _, signatureDigest := range signatures { + if dryRun { + emit("%s: marking signature %s", repoName, signatureDigest) + } + markSet[signatureDigest] = struct{}{} + } + break + case *schema2.DeserializedManifest: + config := manifest.(*schema2.DeserializedManifest).Config + if dryRun { + emit("%s: marking configuration %s", repoName, config.Digest) + } + markSet[config.Digest] = struct{}{} + break + } + + return nil + }) + + if err != nil { + // In certain situations such as unfinished uploads, deleting all + // tags in S3 or removing the _manifests folder manually, this + // error may be of type PathNotFound. + // + // In these cases we can continue marking other manifests safely. + if _, ok := err.(driver.PathNotFoundError); ok { + return nil + } + } + + return err + }) + + if err != nil { + return fmt.Errorf("failed to mark: %v\n", err) + } + + // sweep + blobService := registry.Blobs() + deleteSet := make(map[digest.Digest]struct{}) + err = blobService.Enumerate(ctx, func(dgst digest.Digest) error { + // check if digest is in markSet. If not, delete it! + if _, ok := markSet[dgst]; !ok { + deleteSet[dgst] = struct{}{} + } + return nil + }) + if err != nil { + return fmt.Errorf("error enumerating blobs: %v", err) + } + if dryRun { + emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet)) + } + // Construct vacuum + vacuum := NewVacuum(ctx, storageDriver) + for dgst := range deleteSet { + if dryRun { + emit("blob eligible for deletion: %s", dgst) + continue + } + err = vacuum.RemoveBlob(string(dgst)) + if err != nil { + return fmt.Errorf("failed to delete blob %s: %v\n", dgst, err) + } + } + + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/garbagecollect_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/garbagecollect_test.go new file mode 100644 index 000000000000..a0ba154b6501 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/garbagecollect_test.go @@ -0,0 +1,374 @@ +package storage + +import ( + "io" + "path" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/docker/distribution/testutil" +) + +type image struct { + manifest distribution.Manifest + manifestDigest digest.Digest + layers map[digest.Digest]io.ReadSeeker +} + +func createRegistry(t *testing.T, driver driver.StorageDriver) distribution.Namespace { + ctx := context.Background() + registry, err := NewRegistry(ctx, driver, EnableDelete) + if err != nil { + t.Fatalf("Failed to construct namespace") + } + return registry +} + +func makeRepository(t *testing.T, registry distribution.Namespace, name string) distribution.Repository { + ctx := context.Background() + + // Initialize a dummy repository + named, err := reference.ParseNamed(name) + if err != nil { + t.Fatalf("Failed to parse name %s: %v", name, err) + } + + repo, err := registry.Repository(ctx, named) + if err != nil { + t.Fatalf("Failed to construct repository: %v", err) + } + return repo +} + +func makeManifestService(t *testing.T, repository distribution.Repository) distribution.ManifestService { + ctx := context.Background() + + manifestService, err := repository.Manifests(ctx) + if err != nil { + t.Fatalf("Failed to construct manifest store: %v", err) + } + return manifestService +} + +func allBlobs(t *testing.T, registry distribution.Namespace) map[digest.Digest]struct{} { + ctx := context.Background() + blobService := registry.Blobs() + allBlobsMap := make(map[digest.Digest]struct{}) + err := blobService.Enumerate(ctx, func(dgst digest.Digest) error { + allBlobsMap[dgst] = struct{}{} + return nil + }) + if err != nil { + t.Fatalf("Error getting all blobs: %v", err) + } + return allBlobsMap +} + +func uploadImage(t *testing.T, repository distribution.Repository, im image) digest.Digest { + // upload layers + err := testutil.UploadBlobs(repository, im.layers) + if err != nil { + t.Fatalf("layer upload failed: %v", err) + } + + // upload manifest + ctx := context.Background() + manifestService := makeManifestService(t, repository) + manifestDigest, err := manifestService.Put(ctx, im.manifest) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + return manifestDigest +} + +func uploadRandomSchema1Image(t *testing.T, repository distribution.Repository) image { + randomLayers, err := testutil.CreateRandomLayers(2) + if err != nil { + t.Fatalf("%v", err) + } + + digests := []digest.Digest{} + for digest := range randomLayers { + digests = append(digests, digest) + } + + manifest, err := testutil.MakeSchema1Manifest(digests) + if err != nil { + t.Fatalf("%v", err) + } + + manifestDigest := uploadImage(t, repository, image{manifest: manifest, layers: randomLayers}) + return image{ + manifest: manifest, + manifestDigest: manifestDigest, + layers: randomLayers, + } +} + +func uploadRandomSchema2Image(t *testing.T, repository distribution.Repository) image { + randomLayers, err := testutil.CreateRandomLayers(2) + if err != nil { + t.Fatalf("%v", err) + } + + digests := []digest.Digest{} + for digest := range randomLayers { + digests = append(digests, digest) + } + + manifest, err := testutil.MakeSchema2Manifest(repository, digests) + if err != nil { + t.Fatalf("%v", err) + } + + manifestDigest := uploadImage(t, repository, image{manifest: manifest, layers: randomLayers}) + return image{ + manifest: manifest, + manifestDigest: manifestDigest, + layers: randomLayers, + } +} + +func TestNoDeletionNoEffect(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "palailogos") + manifestService, err := repo.Manifests(ctx) + + image1 := uploadRandomSchema1Image(t, repo) + image2 := uploadRandomSchema1Image(t, repo) + image3 := uploadRandomSchema2Image(t, repo) + + // construct manifestlist for fun. + blobstatter := registry.BlobStatter() + manifestList, err := testutil.MakeManifestList(blobstatter, []digest.Digest{ + image1.manifestDigest, image2.manifestDigest}) + if err != nil { + t.Fatalf("Failed to make manifest list: %v", err) + } + + _, err = manifestService.Put(ctx, manifestList) + if err != nil { + t.Fatalf("Failed to add manifest list: %v", err) + } + + // Run GC + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + + // the +1 at the end is for the manifestList + // the first +3 at the end for each manifest's blob + // the second +3 at the end for each manifest's signature/config layer + totalBlobCount := len(image1.layers) + len(image2.layers) + len(image3.layers) + 1 + 3 + 3 + if len(blobs) != totalBlobCount { + t.Fatalf("Garbage collection affected storage") + } +} + +func TestGCWithMissingManifests(t *testing.T) { + ctx := context.Background() + d := inmemory.New() + + registry := createRegistry(t, d) + repo := makeRepository(t, registry, "testrepo") + uploadRandomSchema1Image(t, repo) + + // Simulate a missing _manifests directory + revPath, err := pathFor(manifestRevisionsPathSpec{"testrepo"}) + if err != nil { + t.Fatal(err) + } + + _manifestsPath := path.Dir(revPath) + err = d.Delete(ctx, _manifestsPath) + if err != nil { + t.Fatal(err) + } + + err = MarkAndSweep(context.Background(), d, registry, false) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + if len(blobs) > 0 { + t.Errorf("unexpected blobs after gc") + } +} + +func TestDeletionHasEffect(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "komnenos") + manifests, err := repo.Manifests(ctx) + + image1 := uploadRandomSchema1Image(t, repo) + image2 := uploadRandomSchema1Image(t, repo) + image3 := uploadRandomSchema2Image(t, repo) + + manifests.Delete(ctx, image2.manifestDigest) + manifests.Delete(ctx, image3.manifestDigest) + + // Run GC + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + + // check that the image1 manifest and all the layers are still in blobs + if _, ok := blobs[image1.manifestDigest]; !ok { + t.Fatalf("First manifest is missing") + } + + for layer := range image1.layers { + if _, ok := blobs[layer]; !ok { + t.Fatalf("manifest 1 layer is missing: %v", layer) + } + } + + // check that image2 and image3 layers are not still around + for layer := range image2.layers { + if _, ok := blobs[layer]; ok { + t.Fatalf("manifest 2 layer is present: %v", layer) + } + } + + for layer := range image3.layers { + if _, ok := blobs[layer]; ok { + t.Fatalf("manifest 3 layer is present: %v", layer) + } + } +} + +func getAnyKey(digests map[digest.Digest]io.ReadSeeker) (d digest.Digest) { + for d = range digests { + break + } + return +} + +func getKeys(digests map[digest.Digest]io.ReadSeeker) (ds []digest.Digest) { + for d := range digests { + ds = append(ds, d) + } + return +} + +func TestDeletionWithSharedLayer(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "tzimiskes") + + // Create random layers + randomLayers1, err := testutil.CreateRandomLayers(3) + if err != nil { + t.Fatalf("failed to make layers: %v", err) + } + + randomLayers2, err := testutil.CreateRandomLayers(3) + if err != nil { + t.Fatalf("failed to make layers: %v", err) + } + + // Upload all layers + err = testutil.UploadBlobs(repo, randomLayers1) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + err = testutil.UploadBlobs(repo, randomLayers2) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + // Construct manifests + manifest1, err := testutil.MakeSchema1Manifest(getKeys(randomLayers1)) + if err != nil { + t.Fatalf("failed to make manifest: %v", err) + } + + sharedKey := getAnyKey(randomLayers1) + manifest2, err := testutil.MakeSchema2Manifest(repo, append(getKeys(randomLayers2), sharedKey)) + if err != nil { + t.Fatalf("failed to make manifest: %v", err) + } + + manifestService := makeManifestService(t, repo) + + // Upload manifests + _, err = manifestService.Put(ctx, manifest1) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + manifestDigest2, err := manifestService.Put(ctx, manifest2) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + // delete + err = manifestService.Delete(ctx, manifestDigest2) + if err != nil { + t.Fatalf("manifest deletion failed: %v", err) + } + + // check that all of the layers in layer 1 are still there + blobs := allBlobs(t, registry) + for dgst := range randomLayers1 { + if _, ok := blobs[dgst]; !ok { + t.Fatalf("random layer 1 blob missing: %v", dgst) + } + } +} + +func TestOrphanBlobDeleted(t *testing.T) { + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "michael_z_doukas") + + digests, err := testutil.CreateRandomLayers(1) + if err != nil { + t.Fatalf("Failed to create random digest: %v", err) + } + + if err = testutil.UploadBlobs(repo, digests); err != nil { + t.Fatalf("Failed to upload blob: %v", err) + } + + // formality to create the necessary directories + uploadRandomSchema2Image(t, repo) + + // Run GC + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + blobs := allBlobs(t, registry) + + // check that orphan blob layers are not still around + for dgst := range digests { + if _, ok := blobs[dgst]; ok { + t.Fatalf("Orphan layer is present: %v", dgst) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go index b55c753d0639..68a347b422b4 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/linkedblobstore.go @@ -1,7 +1,7 @@ package storage import ( - "io" + "fmt" "net/http" "path" "time" @@ -9,6 +9,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/uuid" ) @@ -17,15 +18,12 @@ import ( // repository name and digest. type linkPathFunc func(name string, dgst digest.Digest) (string, error) -// blobsRootPathFunc describes a function that can resolve a root directory of -// blob links based on the repository name. -type blobsRootPathFunc func(name string) (string, error) - // linkedBlobStore provides a full BlobService that namespaces the blobs to a // given repository. Effectively, it manages the links in a given repository // that grant access to the global blob store. type linkedBlobStore struct { *blobStore + registry *registry blobServer distribution.BlobServer blobAccessController distribution.BlobDescriptorService repository distribution.Repository @@ -41,9 +39,8 @@ type linkedBlobStore struct { // treated as the "canonical" link location and will be used for writes. linkPathFns []linkPathFunc - // blobsRootPathFns functions the same way for blob root directories as - // linkPathFns for blob links. - blobsRootPathFns []blobsRootPathFunc + // linkDirectoryPathSpec locates the root directories in which one might find links + linkDirectoryPathSpec pathSpec } var _ distribution.BlobStore = &linkedBlobStore{} @@ -85,10 +82,7 @@ func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter } func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - dgst, err := digest.FromBytes(p) - if err != nil { - return distribution.Descriptor{}, err - } + dgst := digest.FromBytes(p) // Place the data in the blob store first. desc, err := lbs.blobStore.Put(ctx, mediaType, p) if err != nil { @@ -107,15 +101,63 @@ func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) return desc, lbs.linkBlob(ctx, desc) } +// createOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type createOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + } +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*createOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + // Writer begins a blob write session, returning a handle. -func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { - context.GetLogger(ctx).Debug("(*linkedBlobStore).Create") +func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") + + var opts createOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + if opts.Mount.ShouldMount { + desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest()) + if err == nil { + // Mount successful, no need to initiate an upload session + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + } + } uuid := uuid.Generate().String() startedAt := time.Now().UTC() path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Name(), + name: lbs.repository.Named().Name(), id: uuid, }) @@ -124,7 +166,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter } startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Name(), + name: lbs.repository.Named().Name(), id: uuid, }) @@ -137,14 +179,14 @@ func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter return nil, err } - return lbs.newBlobUpload(ctx, uuid, path, startedAt) + return lbs.newBlobUpload(ctx, uuid, path, startedAt, false) } func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Name(), + name: lbs.repository.Named().Name(), id: id, }) @@ -168,7 +210,7 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution } path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Name(), + name: lbs.repository.Named().Name(), id: id, }) @@ -176,31 +218,115 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution return nil, err } - return lbs.newBlobUpload(ctx, id, path, startedAt) + return lbs.newBlobUpload(ctx, id, path, startedAt, true) } func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { - context.GetLogger(ctx).Debug("(*linkedBlobStore).Delete") if !lbs.deleteEnabled { return distribution.ErrUnsupported } - return lbs.blobAccessController.Clear(ctx, dgst) + // Ensure the blob is available for deletion + _, err := lbs.blobAccessController.Stat(ctx, dgst) + if err != nil { + return err + } + + err = lbs.blobAccessController.Clear(ctx, dgst) + if err != nil { + return err + } + + return nil +} + +func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingestor func(digest.Digest) error) error { + rootPath, err := pathFor(lbs.linkDirectoryPathSpec) + if err != nil { + return err + } + err = Walk(ctx, lbs.blobStore.driver, rootPath, func(fileInfo driver.FileInfo) error { + // exit early if directory... + if fileInfo.IsDir() { + return nil + } + filePath := fileInfo.Path() + + // check if it's a link + _, fileName := path.Split(filePath) + if fileName != "link" { + return nil + } + + // read the digest found in link + digest, err := lbs.blobStore.readlink(ctx, filePath) + if err != nil { + return err + } + + // ensure this conforms to the linkPathFns + _, err = lbs.Stat(ctx, digest) + if err != nil { + // we expect this error to occur so we move on + if err == distribution.ErrBlobUnknown { + return nil + } + return err + } + + err = ingestor(digest) + if err != nil { + return err + } + + return nil + }) + + if err != nil { + return err + } + + return nil +} + +func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { + repo, err := lbs.registry.Repository(ctx, sourceRepo) + if err != nil { + return distribution.Descriptor{}, err + } + stat, err := repo.Blobs(ctx).Stat(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + desc := distribution.Descriptor{ + Size: stat.Size, + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + } + return desc, lbs.linkBlob(ctx, desc) } // newBlobUpload allocates a new upload controller with the given state. -func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time) (distribution.BlobWriter, error) { - fw, err := newFileWriter(ctx, lbs.driver, path) +func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time, append bool) (distribution.BlobWriter, error) { + fw, err := lbs.driver.Writer(ctx, path, append) if err != nil { return nil, err } bw := &blobWriter{ - blobStore: lbs, - id: uuid, - startedAt: startedAt, - digester: digest.Canonical.New(), - bufferedFileWriter: *fw, + ctx: ctx, + blobStore: lbs, + id: uuid, + startedAt: startedAt, + digester: digest.Canonical.New(), + fileWriter: fw, + driver: lbs.driver, + path: path, resumableDigestEnabled: lbs.resumableDigestEnabled, } @@ -228,7 +354,7 @@ func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution } seenDigests[dgst] = struct{}{} - blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) if err != nil { return err } @@ -241,42 +367,6 @@ func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution return nil } -func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingest func(digest.Digest) error) error { - context.GetLogger(ctx).Debug("(*linkedBlobStore).Enumerate") - allProcessed := true - - for _, pathFn := range lbs.blobsRootPathFns { - rootPath, err := pathFn(lbs.repository.Name()) - if err != nil { - return err - } - - walkFn, err := makeBlobStoreWalkFunc(rootPath, false, ingest) - if err != nil { - return err - } - - err = WalkSortedChildren(ctx, lbs.driver, rootPath, walkFn) - if err != nil { - switch err.(type) { - case driver.PathNotFoundError: - default: - if err != ErrFinishedWalk { - return err - } - // ErrFinishedWalk meens caller don't want us to continue - allProcessed = false - } - } - } - - if allProcessed { - return io.EOF - } - - return nil -} - type linkedBlobStatter struct { *blobStore repository distribution.Repository @@ -288,18 +378,14 @@ type linkedBlobStatter struct { // removed an the blob links folder should be merged. The first entry is // treated as the "canonical" link location and will be used for writes. linkPathFns []linkPathFunc - - // Causes directory containing blob's data to be removed recursively upon - // Clear. - removeParentsOnDelete bool } var _ distribution.BlobDescriptorService = &linkedBlobStatter{} func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { var ( - resolveErr error - target digest.Digest + found bool + target digest.Digest ) // try the many link path functions until we get success or an error that @@ -309,24 +395,24 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) if err == nil { - resolveErr = nil + found = true break // success! } switch err := err.(type) { case driver.PathNotFoundError: - resolveErr = distribution.ErrBlobUnknown // move to the next linkPathFn, saving the error + // do nothing, just move to the next linkPathFn default: return distribution.Descriptor{}, err } } - if resolveErr != nil { - return distribution.Descriptor{}, resolveErr + if !found { + return distribution.Descriptor{}, distribution.ErrBlobUnknown } if target != dgst { - // Track when we are doing cross-digest domain lookups. ie, tarsum to sha256. + // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256. context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) } @@ -337,21 +423,14 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis } func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { - // return ErrBlobUnknown if none of the paths exist - resolveErr := distribution.ErrBlobUnknown - // clear any possible existence of a link described in linkPathFns for _, linkPathFn := range lbs.linkPathFns { - blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) if err != nil { return err } - pth := blobLinkPath - if lbs.removeParentsOnDelete { - pth = path.Dir(blobLinkPath) - } - err = lbs.blobStore.driver.Delete(ctx, pth) + err = lbs.blobStore.driver.Delete(ctx, blobLinkPath) if err != nil { switch err := err.(type) { case driver.PathNotFoundError: @@ -360,17 +439,16 @@ func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (er return err } } - resolveErr = nil } - return resolveErr + return nil } // resolveTargetWithFunc allows us to read a link to a resource with different // linkPathFuncs to let us try a few different paths before returning not // found. func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { - blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst) if err != nil { return "", err } @@ -388,18 +466,7 @@ func blobLinkPath(name string, dgst digest.Digest) (string, error) { return pathFor(layerLinkPathSpec{name: name, digest: dgst}) } -// blobsRootPath provides the path to the root of blob links, also known as -// layers. -func blobsRootPath(name string) (string, error) { - return pathFor(layersPathSpec{name: name}) -} - // manifestRevisionLinkPath provides the path to the manifest revision link. func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) { return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst}) } - -// manifestRevisionsPath provides the path to the manifest revisions directory. -func manifestRevisionsPath(name string) (string, error) { - return pathFor(manifestRevisionsPathSpec{name: name}) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifestlisthandler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifestlisthandler.go new file mode 100644 index 000000000000..42027d1339eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifestlisthandler.go @@ -0,0 +1,96 @@ +package storage + +import ( + "fmt" + + "encoding/json" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" +) + +// manifestListHandler is a ManifestHandler that covers schema2 manifest lists. +type manifestListHandler struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context +} + +var _ ManifestHandler = &manifestListHandler{} + +func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") + + var m manifestlist.DeserializedManifestList + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") + + m, ok := manifestList.(*manifestlist.DeserializedManifestList) + if !ok { + return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + // Link the revision into the repository. + if err := ms.blobStore.linkBlob(ctx, revision); err != nil { + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to +// store valid content, leaving trust policies of that content up to +// consumers. +func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if !skipDependencyVerification { + // This manifest service is different from the blob service + // returned by Blob. It uses a linked blob store to ensure that + // only manifests are accessible. + manifestService, err := ms.repository.Manifests(ctx) + if err != nil { + return err + } + + for _, manifestDescriptor := range mnfst.References() { + exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest) + if err != nil && err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + if err != nil || !exists { + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go index 223098044455..5a9165f9097c 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore.go @@ -2,31 +2,62 @@ package storage import ( "fmt" - "io" + "path" + "encoding/json" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/reference" - "github.com/docker/libtrust" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/storage/driver" ) +// A ManifestHandler gets and puts manifests of a particular type. +type ManifestHandler interface { + // Unmarshal unmarshals the manifest from a byte slice. + Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest. + Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) +} + +// SkipLayerVerification allows a manifest to be Put before its +// layers are on the filesystem +func SkipLayerVerification() distribution.ManifestServiceOption { + return skipLayerOption{} +} + +type skipLayerOption struct{} + +func (o skipLayerOption) Apply(m distribution.ManifestService) error { + if ms, ok := m.(*manifestStore); ok { + ms.skipDependencyVerification = true + return nil + } + return fmt.Errorf("skip layer verification only valid for manifestStore") +} + type manifestStore struct { - repository *repository - revisionStore *revisionStore - tagStore *tagStore - ctx context.Context + repository *repository + blobStore *linkedBlobStore + ctx context.Context + skipDependencyVerification bool - enumerateAllDigests bool + + schema1Handler ManifestHandler + schema2Handler ManifestHandler + manifestListHandler ManifestHandler } var _ distribution.ManifestService = &manifestStore{} -func (ms *manifestStore) Exists(dgst digest.Digest) (bool, error) { +func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") - _, err := ms.revisionStore.blobStore.Stat(ms.ctx, dgst) + _, err := ms.blobStore.Stat(ms.ctx, dgst) if err != nil { if err == distribution.ErrBlobUnknown { return false, nil @@ -38,158 +69,120 @@ func (ms *manifestStore) Exists(dgst digest.Digest) (bool, error) { return true, nil } -func (ms *manifestStore) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { +func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") - return ms.revisionStore.get(ms.ctx, dgst) -} -// SkipLayerVerification allows a manifest to be Put before it's -// layers are on the filesystem -func SkipLayerVerification(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifestStore); ok { - ms.skipDependencyVerification = true - return nil + // TODO(stevvooe): Need to check descriptor from above to ensure that the + // mediatype is as we expect for the manifest store. + + content, err := ms.blobStore.Get(ctx, dgst) + if err != nil { + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: ms.repository.Named().Name(), + Revision: dgst, + } + } + + return nil, err } - return fmt.Errorf("skip layer verification only valid for manifestStore") -} -// EnumerateAllDigests causes Enumerate method to include all the digests found -// without checking whether they exist and belong to manifest revisions or not. -func EnumerateAllDigests(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifestStore); ok { - ms.enumerateAllDigests = true - return nil + var versioned manifest.Versioned + if err = json.Unmarshal(content, &versioned); err != nil { + return nil, err + } + + switch versioned.SchemaVersion { + case 1: + return ms.schema1Handler.Unmarshal(ctx, dgst, content) + case 2: + // This can be an image manifest or a manifest list + switch versioned.MediaType { + case schema2.MediaTypeManifest: + return ms.schema2Handler.Unmarshal(ctx, dgst, content) + case manifestlist.MediaTypeManifestList: + return ms.manifestListHandler.Unmarshal(ctx, dgst, content) + default: + return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} + } } - return fmt.Errorf("enumerate all digests only valid for manifeststore") + + return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion) } -func (ms *manifestStore) Put(manifest *schema1.SignedManifest) error { +func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") - if err := ms.verifyManifest(ms.ctx, manifest); err != nil { - return err - } - - // Store the revision of the manifest - revision, err := ms.revisionStore.put(ms.ctx, manifest) - if err != nil { - return err + switch manifest.(type) { + case *schema1.SignedManifest: + return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *schema2.DeserializedManifest: + return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *manifestlist.DeserializedManifestList: + return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification) } - // Now, tag the manifest - return ms.tagStore.tag(manifest.Tag, revision.Digest) + return "", fmt.Errorf("unrecognized manifest type %T", manifest) } // Delete removes the revision of the specified manfiest. -func (ms *manifestStore) Delete(dgst digest.Digest) error { +func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") - return ms.revisionStore.delete(ms.ctx, dgst) -} - -func (ms *manifestStore) Tags() ([]string, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Tags") - return ms.tagStore.tags() -} - -func (ms *manifestStore) ExistsByTag(tag string) (bool, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).ExistsByTag") - return ms.tagStore.exists(tag) + return ms.blobStore.Delete(ctx, dgst) } -func (ms *manifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { - for _, option := range options { - err := option(ms) +func (ms *manifestStore) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error { + err := ms.blobStore.Enumerate(ctx, func(dgst digest.Digest) error { + err := ingester(dgst) if err != nil { - return nil, err + return err } - } + return nil + }) + return err +} - context.GetLogger(ms.ctx).Debug("(*manifestStore).GetByTag") - dgst, err := ms.tagStore.resolve(tag) +// Only valid for schema1 signed manifests +func (ms *manifestStore) GetSignatures(ctx context.Context, manifestDigest digest.Digest) ([]digest.Digest, error) { + // sanity check that digest refers to a schema1 digest + manifest, err := ms.Get(ctx, manifestDigest) if err != nil { return nil, err } - return ms.revisionStore.get(ms.ctx, dgst) -} - -// Enumerate retuns an array of digests of all manifest revisions in repository. -// Returned digests may not be resolvable to actual data. -func (ms *manifestStore) Enumerate() ([]digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Enumerate") - dgsts, err := ms.revisionStore.enumerate() - if err != nil && err != io.EOF { - return nil, err - } - if ms.enumerateAllDigests { - return dgsts, nil - } - res := make([]digest.Digest, 0, len(dgsts)) - for _, dgst := range dgsts { - if _, err := ms.Get(dgst); err == nil { - res = append(res, dgst) - } - } - return res, nil -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. It ensures that the signature is valid for the -// enclosed payload. As a policy, the registry only tries to store valid -// content, leaving trust policies of that content up to consumers. -func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *schema1.SignedManifest) error { - var errs distribution.ErrManifestVerification - - if len(mnfst.Name) > reference.NameTotalLengthMax { - errs = append(errs, - distribution.ErrManifestNameInvalid{ - Name: mnfst.Name, - Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), - }) + if _, ok := manifest.(*schema1.SignedManifest); !ok { + return nil, fmt.Errorf("digest %v is not for schema1 manifest", manifestDigest) } - if !reference.NameRegexp.MatchString(mnfst.Name) { - errs = append(errs, - distribution.ErrManifestNameInvalid{ - Name: mnfst.Name, - Reason: fmt.Errorf("invalid manifest name format"), - }) + signaturesPath, err := pathFor(manifestSignaturesPathSpec{ + name: ms.repository.Named().Name(), + revision: manifestDigest, + }) + if err != nil { + return nil, err } - if len(mnfst.History) != len(mnfst.FSLayers) { - errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", - len(mnfst.History), len(mnfst.FSLayers))) - } + var digests []digest.Digest + alg := string(digest.SHA256) + signaturePaths, err := ms.blobStore.driver.List(ctx, path.Join(signaturesPath, alg)) - if _, err := schema1.Verify(mnfst); err != nil { - switch err { - case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: - errs = append(errs, distribution.ErrManifestUnverified{}) - default: - if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust - errs = append(errs, distribution.ErrManifestUnverified{}) - } else { - errs = append(errs, err) - } - } + switch err.(type) { + case nil: + break + case driver.PathNotFoundError: + // Manifest may have been pushed with signature store disabled + return digests, nil + default: + return nil, err } - if !ms.skipDependencyVerification { - for _, fsLayer := range mnfst.FSLayers { - _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.BlobSum) - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.BlobSum}) - } + for _, sigPath := range signaturePaths { + sigdigest, err := digest.ParseDigest(alg + ":" + path.Base(sigPath)) + if err != nil { + // merely found not a digest + continue } + digests = append(digests, sigdigest) } - if len(errs) != 0 { - return errs - } - - return nil + return digests, nil } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go index a7c4b0f4a0b0..fcb5adf9ae83 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/manifeststore_test.go @@ -11,6 +11,7 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -23,14 +24,14 @@ type manifestStoreTestEnv struct { driver driver.StorageDriver registry distribution.Namespace repository distribution.Repository - name string + name reference.Named tag string } -func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { +func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string, options ...RegistryOption) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + registry, err := NewRegistry(ctx, driver, options...) if err != nil { t.Fatalf("error creating registry: %v", err) } @@ -51,43 +52,32 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE } func TestManifestStorage(t *testing.T) { - env := newManifestStoreTestEnv(t, "foo/bar", "thetag") - ctx := context.Background() - ms, err := env.repository.Manifests(ctx) - if err != nil { - t.Fatal(err) - } + testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) +} - exists, err := ms.ExistsByTag(env.tag) +func TestManifestStorageDisabledSignatures(t *testing.T) { + k, err := libtrust.GenerateECP256PrivateKey() if err != nil { - t.Fatalf("unexpected error checking manifest existence: %v", err) - } - - if exists { - t.Fatalf("manifest should not exist") + t.Fatal(err) } + testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect, DisableSchema1Signatures, Schema1SigningKey(k)) +} - dgsts, err := ms.Enumerate() +func testManifestStorage(t *testing.T, options ...RegistryOption) { + repoName, _ := reference.ParseNamed("foo/bar") + env := newManifestStoreTestEnv(t, repoName, "thetag", options...) + ctx := context.Background() + ms, err := env.repository.Manifests(ctx) if err != nil { - t.Errorf("unexpected error enumerating manifest revisions: %v", err) - } else if len(dgsts) != 0 { - t.Errorf("expected exactly 0 manifests, not %d", len(dgsts)) - } - - if _, err := ms.GetByTag(env.tag); true { - switch err.(type) { - case distribution.ErrManifestUnknown: - break - default: - t.Fatalf("expected manifest unknown error: %#v", err) - } + t.Fatal(err) } + equalSignatures := env.registry.(*registry).schema1SignaturesEnabled m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, - Name: env.name, + Name: env.name.Name(), Tag: env.tag, } @@ -121,7 +111,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("error signing manifest: %v", err) } - err = ms.Put(sm) + _, err = ms.Put(ctx, sm) if err == nil { t.Fatalf("expected errors putting manifest with full verification") } @@ -157,30 +147,46 @@ func TestManifestStorage(t *testing.T) { } } - if err = ms.Put(sm); err != nil { + var manifestDigest digest.Digest + if manifestDigest, err = ms.Put(ctx, sm); err != nil { t.Fatalf("unexpected error putting manifest: %v", err) } - exists, err = ms.ExistsByTag(env.tag) + exists, err := ms.Exists(ctx, manifestDigest) if err != nil { - t.Fatalf("unexpected error checking manifest existence: %v", err) + t.Fatalf("unexpected error checking manifest existence: %#v", err) } if !exists { t.Fatalf("manifest should exist") } - fetchedManifest, err := ms.GetByTag(env.tag) - + fromStore, err := ms.Get(ctx, manifestDigest) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } - if !reflect.DeepEqual(fetchedManifest, sm) { - t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest, sm) + fetchedManifest, ok := fromStore.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected manifest type from signedstore") + } + + if !bytes.Equal(fetchedManifest.Canonical, sm.Canonical) { + t.Fatalf("fetched payload does not match original payload: %q != %q", fetchedManifest.Canonical, sm.Canonical) } - fetchedJWS, err := libtrust.ParsePrettySignature(fetchedManifest.Raw, "signatures") + if equalSignatures { + if !reflect.DeepEqual(fetchedManifest, sm) { + t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest.Manifest, sm.Manifest) + } + } + + _, pl, err := fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + fetchedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") if err != nil { t.Fatalf("unexpected error parsing jws: %v", err) } @@ -192,12 +198,9 @@ func TestManifestStorage(t *testing.T) { // Now that we have a payload, take a moment to check that the manifest is // return by the payload digest. - dgst, err := digest.FromBytes(payload) - if err != nil { - t.Fatalf("error getting manifest digest: %v", err) - } - exists, err = ms.Exists(dgst) + dgst := digest.FromBytes(payload) + exists, err = ms.Exists(ctx, dgst) if err != nil { t.Fatalf("error checking manifest existence by digest: %v", err) } @@ -206,73 +209,33 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("manifest %s should exist", dgst) } - fetchedByDigest, err := ms.Get(dgst) + fetchedByDigest, err := ms.Get(ctx, dgst) if err != nil { t.Fatalf("unexpected error fetching manifest by digest: %v", err) } - if !reflect.DeepEqual(fetchedByDigest, fetchedManifest) { - t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedByDigest, fetchedManifest) - } - - sigs, err := fetchedJWS.Signatures() - if err != nil { - t.Fatalf("unable to extract signatures: %v", err) - } - - if len(sigs) != 1 { - t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1) + byDigestManifest, ok := fetchedByDigest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected manifest type from signedstore") } - // Enumerate only valid manifest revision digests - dgsts, err = ms.Enumerate() - if err != nil { - t.Errorf("unexpected error enumerating manifest revisions: %v", err) - } else if len(dgsts) != 1 { - t.Errorf("expected exactly 1 manifest, not %d", len(dgsts)) - } else if dgsts[0] != dgst { - t.Errorf("got unexpected digest manifest (%s != %s)", dgsts[0], dgst) + if !bytes.Equal(byDigestManifest.Canonical, fetchedManifest.Canonical) { + t.Fatalf("fetched manifest not equal: %q != %q", byDigestManifest.Canonical, fetchedManifest.Canonical) } - // Enumerate all digests - if err := EnumerateAllDigests(ms); err != nil { - t.Fatalf("failed to configure enumeration of all digests: %v", err) - } - dgsts, err = ms.Enumerate() - if err != nil { - t.Errorf("unexpected error enumerating manifest revisions: %v", err) - } else { - // _layers contain 2 links per one tarsum blob - expCount := 1 + len(testLayers)*2 - if len(dgsts) != expCount { - t.Errorf("unexpected number of returned digests (%d != %d)", len(dgsts), expCount) - } - received := make(map[digest.Digest]struct{}) - for _, dgst := range dgsts { - received[dgst] = struct{}{} - } - if _, exists := received[dgst]; !exists { - t.Errorf("expected manifest revision %s to be returned", dgst.String()) - } - for dgst := range testLayers { - if _, exists := received[dgst]; !exists { - t.Errorf("expected layer blob %s to be returned", dgst.String()) - } + if equalSignatures { + if !reflect.DeepEqual(fetchedByDigest, fetchedManifest) { + t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedByDigest, fetchedManifest) } } - // Grabs the tags and check that this tagged manifest is present - tags, err := ms.Tags() + sigs, err := fetchedJWS.Signatures() if err != nil { - t.Fatalf("unexpected error fetching tags: %v", err) - } - - if len(tags) != 1 { - t.Fatalf("unexpected tags returned: %v", tags) + t.Fatalf("unable to extract signatures: %v", err) } - if tags[0] != env.tag { - t.Fatalf("unexpected tag found in tags: %v != %v", tags, []string{env.tag}) + if len(sigs) != 1 { + t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1) } // Now, push the same manifest with a different key @@ -285,8 +248,12 @@ func TestManifestStorage(t *testing.T) { if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } + _, pl, err = sm2.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } - jws2, err := libtrust.ParsePrettySignature(sm2.Raw, "signatures") + jws2, err := libtrust.ParsePrettySignature(pl, "signatures") if err != nil { t.Fatalf("error parsing signature: %v", err) } @@ -300,15 +267,20 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1) } - if err = ms.Put(sm2); err != nil { + if manifestDigest, err = ms.Put(ctx, sm2); err != nil { t.Fatalf("unexpected error putting manifest: %v", err) } - fetched, err := ms.GetByTag(env.tag) + fromStore, err = ms.Get(ctx, manifestDigest) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } + fetched, ok := fromStore.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected type from signed manifeststore : %T", fetched) + } + if _, err := schema1.Verify(fetched); err != nil { t.Fatalf("unexpected error verifying manifest: %v", err) } @@ -324,7 +296,12 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected error getting expected signatures: %v", err) } - receivedJWS, err := libtrust.ParsePrettySignature(fetched.Raw, "signatures") + _, pl, err = fetched.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + receivedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") if err != nil { t.Fatalf("unexpected error parsing jws: %v", err) } @@ -338,24 +315,26 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("payloads are not equal") } - receivedSigs, err := receivedJWS.Signatures() - if err != nil { - t.Fatalf("error getting signatures: %v", err) - } + if equalSignatures { + receivedSigs, err := receivedJWS.Signatures() + if err != nil { + t.Fatalf("error getting signatures: %v", err) + } - for i, sig := range receivedSigs { - if !bytes.Equal(sig, expectedSigs[i]) { - t.Fatalf("mismatched signatures from remote: %v != %v", string(sig), string(expectedSigs[i])) + for i, sig := range receivedSigs { + if !bytes.Equal(sig, expectedSigs[i]) { + t.Fatalf("mismatched signatures from remote: %v != %v", string(sig), string(expectedSigs[i])) + } } } // Test deleting manifests - err = ms.Delete(dgst) + err = ms.Delete(ctx, dgst) if err != nil { t.Fatalf("unexpected an error deleting manifest by digest: %v", err) } - exists, err = ms.Exists(dgst) + exists, err = ms.Exists(ctx, dgst) if err != nil { t.Fatalf("Error querying manifest existence") } @@ -363,7 +342,7 @@ func TestManifestStorage(t *testing.T) { t.Errorf("Deleted manifest should not exist") } - deletedManifest, err := ms.Get(dgst) + deletedManifest, err := ms.Get(ctx, dgst) if err == nil { t.Errorf("Unexpected success getting deleted manifest") } @@ -379,12 +358,12 @@ func TestManifestStorage(t *testing.T) { } // Re-upload should restore manifest to a good state - err = ms.Put(sm) + _, err = ms.Put(ctx, sm) if err != nil { t.Errorf("Error re-uploading deleted manifest") } - exists, err = ms.Exists(dgst) + exists, err = ms.Exists(ctx, dgst) if err != nil { t.Fatalf("Error querying manifest existence") } @@ -392,7 +371,7 @@ func TestManifestStorage(t *testing.T) { t.Errorf("Restored manifest should exist") } - deletedManifest, err = ms.Get(dgst) + deletedManifest, err = ms.Get(ctx, dgst) if err != nil { t.Errorf("Unexpected error getting manifest") } @@ -412,7 +391,7 @@ func TestManifestStorage(t *testing.T) { if err != nil { t.Fatal(err) } - err = ms.Delete(dgst) + err = ms.Delete(ctx, dgst) if err == nil { t.Errorf("Unexpected success deleting while disabled") } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go index c6a11d2f81ea..8985f043f396 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths.go @@ -16,12 +16,6 @@ const ( // storage path root would configurable for all drivers through this // package. In reality, we've found it simpler to do this on a per driver // basis. - - layersDirectory = "_layers" - manifestsDirectory = "_manifests" - uploadsDirectory = "_uploads" - - multilevelHexPrefixLength = 2 ) // pathFor maps paths based on "object names" and their ids. The "object @@ -48,13 +42,13 @@ const ( // data // startedat // hashstates// -// -> blobs/ +// -> blob/ // // // The storage backend layout is broken up into a content-addressable blob // store and repositories. The content-addressable blob store holds most data // throughout the backend, keyed by algorithm and digests of the underlying -// content. Access to the blob store is controled through links from the +// content. Access to the blob store is controlled through links from the // repository to blobstore. // // A repository is made up of layers, manifests and tags. The layers component @@ -80,7 +74,7 @@ const ( // // Manifests: // -// manifestRevisionsPathSpec: /v2/repositories//_manifests/revisions/ +// manifestRevisionsPathSpec: /v2/repositories//_manifests/revisions/ // manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// // manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link // manifestSignaturesPathSpec: /v2/repositories//_manifests/revisions///signatures/ @@ -97,7 +91,6 @@ const ( // // Blobs: // -// layersPathSpec: /v2/repositories//_layers/ // layerLinkPathSpec: /v2/repositories//_layers///link // // Uploads: @@ -108,6 +101,7 @@ const ( // // Blob Store: // +// blobsPathSpec: /v2/blobs/ // blobPathSpec: /v2/blobs/// // blobDataPathSpec: /v2/blobs////data // blobMediaTypePathSpec: /v2/blobs////data @@ -134,19 +128,15 @@ func pathFor(spec pathSpec) (string, error) { switch v := spec.(type) { case manifestRevisionsPathSpec: + return path.Join(append(repoPrefix, v.name, "_manifests", "revisions")...), nil - return path.Join(append(repoPrefix, v.name, manifestsDirectory, "revisions")...), nil case manifestRevisionPathSpec: - revisionsPrefix, err := pathFor(manifestRevisionsPathSpec{name: v.name}) - if err != nil { - return "", err - } components, err := digestPathComponents(v.revision, false) if err != nil { return "", err } - return path.Join(append([]string{revisionsPrefix}, components...)...), nil + return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil case manifestRevisionLinkPathSpec: root, err := pathFor(manifestRevisionPathSpec{ name: v.name, @@ -186,7 +176,7 @@ func pathFor(spec pathSpec) (string, error) { return path.Join(root, path.Join(append(signatureComponents, "link")...)), nil case manifestTagsPathSpec: - return path.Join(append(repoPrefix, v.name, manifestsDirectory, "tags")...), nil + return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil case manifestTagPathSpec: root, err := pathFor(manifestTagsPathSpec{ name: v.name, @@ -247,26 +237,23 @@ func pathFor(spec pathSpec) (string, error) { } return path.Join(root, path.Join(components...)), nil - case layersPathSpec: - - return path.Join(append(repoPrefix, v.name, layersDirectory)...), nil case layerLinkPathSpec: - layersPrefix, err := pathFor(layersPathSpec{name: v.name}) - if err != nil { - return "", err - } components, err := digestPathComponents(v.digest, false) if err != nil { return "", err } - components = append(components, "link") // TODO(stevvooe): Right now, all blobs are linked under "_layers". If // we have future migrations, we may want to rename this to "_blobs". // A migration strategy would simply leave existing items in place and // write the new paths, commit a file then delete the old files. - return path.Join(append([]string{layersPrefix}, components...)...), nil + blobLinkPathComponents := append(repoPrefix, v.name, "_layers") + + return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil + case blobsPathSpec: + blobsPathPrefix := append(rootPrefix, "blobs") + return path.Join(blobsPathPrefix...), nil case blobPathSpec: components, err := digestPathComponents(v.digest, true) if err != nil { @@ -274,27 +261,27 @@ func pathFor(spec pathSpec) (string, error) { } blobPathPrefix := append(rootPrefix, "blobs") - return path.Join(append(blobPathPrefix, components...)...), nil case blobDataPathSpec: - blobPathPrefix, err := pathFor(blobPathSpec{ - digest: v.digest, - }) + components, err := digestPathComponents(v.digest, true) if err != nil { return "", err } - return path.Join(blobPathPrefix, "data"), nil + components = append(components, "data") + blobPathPrefix := append(rootPrefix, "blobs") + return path.Join(append(blobPathPrefix, components...)...), nil + case uploadDataPathSpec: - return path.Join(append(repoPrefix, v.name, uploadsDirectory, v.id, "data")...), nil + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil case uploadStartedAtPathSpec: - return path.Join(append(repoPrefix, v.name, uploadsDirectory, v.id, "startedat")...), nil + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil case uploadHashStatePathSpec: offset := fmt.Sprintf("%d", v.offset) if v.list { offset = "" // Limit to the prefix for listing offsets. } - return path.Join(append(repoPrefix, v.name, uploadsDirectory, v.id, "hashstates", string(v.alg), offset)...), nil + return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil case repositoriesRootPathSpec: return path.Join(repoPrefix...), nil default: @@ -310,8 +297,8 @@ type pathSpec interface { pathSpec() } -// manifestRevisionsPathSpec describes the components of the directory path for -// a root of repository revisions. +// manifestRevisionsPathSpec describes the directory path for +// a manifest revision. type manifestRevisionsPathSpec struct { name string } @@ -338,7 +325,7 @@ type manifestRevisionLinkPathSpec struct { func (manifestRevisionLinkPathSpec) pathSpec() {} -// manifestSignaturesPathSpec decribes the path components for the directory +// manifestSignaturesPathSpec describes the path components for the directory // containing all the signatures for the target blob. Entries are named with // the underlying key id. type manifestSignaturesPathSpec struct { @@ -348,7 +335,7 @@ type manifestSignaturesPathSpec struct { func (manifestSignaturesPathSpec) pathSpec() {} -// manifestSignatureLinkPathSpec decribes the path components used to look up +// manifestSignatureLinkPathSpec describes the path components used to look up // a signature file by the hash of its blob. type manifestSignatureLinkPathSpec struct { name string @@ -413,13 +400,6 @@ type manifestTagIndexEntryLinkPathSpec struct { func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} -// layersPathSpec describes the root directory of repository layer links. -type layersPathSpec struct { - name string -} - -func (layersPathSpec) pathSpec() {} - // blobLinkPathSpec specifies a path for a blob link, which is a file with a // blob id. The blob link will contain a content addressable blob id reference // into the blob store. The format of the contents is as follows: @@ -440,15 +420,19 @@ type layerLinkPathSpec struct { func (layerLinkPathSpec) pathSpec() {} // blobAlgorithmReplacer does some very simple path sanitization for user -// input. Mostly, this is to provide some hierarchy for tarsum digests. Paths -// should be "safe" before getting this far due to strict digest requirements -// but we can add further path conversion here, if needed. +// input. Paths should be "safe" before getting this far due to strict digest +// requirements but we can add further path conversion here, if needed. var blobAlgorithmReplacer = strings.NewReplacer( "+", "/", ".", "/", ";", "/", ) +// blobsPathSpec contains the path for the blobs directory +type blobsPathSpec struct{} + +func (blobsPathSpec) pathSpec() {} + // blobPathSpec contains the path for the registry global blob store. type blobPathSpec struct { digest digest.Digest @@ -512,10 +496,6 @@ func (repositoriesRootPathSpec) pathSpec() {} // // / // -// Most importantly, for tarsum, the layout looks like this: -// -// tarsum/// -// // If multilevel is true, the first two bytes of the digest will separate // groups of digest folder. It will be as follows: // @@ -533,24 +513,30 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) var suffix []string if multilevel { - suffix = append(suffix, hex[:multilevelHexPrefixLength]) + suffix = append(suffix, hex[:2]) } suffix = append(suffix, hex) - if tsi, err := digest.ParseTarSum(dgst.String()); err == nil { - // We have a tarsum! - version := tsi.Version - if version == "" { - version = "v0" - } + return append(prefix, suffix...), nil +} - prefix = []string{ - "tarsum", - version, - tsi.Algorithm, - } +// Reconstructs a digest from a path +func digestFromPath(digestPath string) (digest.Digest, error) { + + digestPath = strings.TrimSuffix(digestPath, "/data") + dir, hex := path.Split(digestPath) + dir = path.Dir(dir) + dir, next := path.Split(dir) + + // next is either the algorithm OR the first two characters in the hex string + var algo string + if next == hex[:2] { + algo = path.Base(dir) + } else { + algo = next } - return append(prefix, suffix...), nil + dgst := digest.NewDigestFromHex(algo, hex) + return dgst, dgst.Validate() } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths_test.go index 238e2f377c83..91004bd40c36 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/paths_test.go @@ -84,25 +84,6 @@ func TestPathMapper(t *testing.T) { }, expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", }, - { - spec: layerLinkPathSpec{ - name: "foo/bar", - digest: "tarsum.v1+test:abcdef", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link", - }, - { - spec: blobDataPathSpec{ - digest: digest.Digest("tarsum.dev+sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"), - }, - expected: "/docker/registry/v2/blobs/tarsum/dev/sha512/ab/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/data", - }, - { - spec: blobDataPathSpec{ - digest: digest.Digest("tarsum.v1+sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"), - }, - expected: "/docker/registry/v2/blobs/tarsum/v1/sha256/ab/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/data", - }, { spec: uploadDataPathSpec{ @@ -141,3 +122,29 @@ func TestPathMapper(t *testing.T) { } } + +func TestDigestFromPath(t *testing.T) { + for _, testcase := range []struct { + path string + expected digest.Digest + multilevel bool + err error + }{ + { + path: "/docker/registry/v2/blobs/sha256/99/9943fffae777400c0344c58869c4c2619c329ca3ad4df540feda74d291dd7c86/data", + multilevel: true, + expected: "sha256:9943fffae777400c0344c58869c4c2619c329ca3ad4df540feda74d291dd7c86", + err: nil, + }, + } { + result, err := digestFromPath(testcase.path) + if err != testcase.err { + t.Fatalf("Unexpected error value %v when we wanted %v", err, testcase.err) + } + + if result != testcase.expected { + t.Fatalf("Unexpected result value %v when we wanted %v", result, testcase.expected) + + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go index f3f6b7f7bd2e..7576b189c779 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/purgeuploads.go @@ -72,7 +72,7 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv _, file := path.Split(filePath) if file[0] == '_' { // Reserved directory - inUploadDir = (file == uploadsDirectory) + inUploadDir = (file == "_uploads") if fileInfo.IsDir() && !inUploadDir { return ErrSkipDir diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go index a6316f9a88f0..3fe4ac689ba0 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/registry.go @@ -1,23 +1,26 @@ package storage import ( - "fmt" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/libtrust" ) // registry is the top-level implementation of Registry for use in the storage // package. All instances should descend from this object. type registry struct { - blobStore *blobStore - blobServer *blobServer - statter *blobStatter // global statter service. - blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider - deleteEnabled bool - resumableDigestEnabled bool + blobStore *blobStore + blobServer *blobServer + statter *blobStatter // global statter service. + blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider + deleteEnabled bool + resumableDigestEnabled bool + schema1SignaturesEnabled bool + schema1SigningKey libtrust.PrivateKey + blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory } // RegistryOption is the type used for functional options for NewRegistry. @@ -33,7 +36,6 @@ func EnableRedirect(registry *registry) error { // EnableDelete is a functional option for NewRegistry. It enables deletion on // the registry. func EnableDelete(registry *registry) error { - registry.blobStore.deleteEnabled = true registry.deleteEnabled = true return nil } @@ -45,15 +47,33 @@ func DisableDigestResumption(registry *registry) error { return nil } -// RemoveParentsOnDelete is a functional option for NewRegistry. It causes -// parent directory of blob's data or link to be deleted as well during Delete. -// It should be used only with storage drivers providing strong consistency. -// Must be used together with `EnableDelete`. -func RemoveParentsOnDelete(registry *registry) error { - registry.blobStore.removeParentsOnDelete = true +// DisableSchema1Signatures is a functional option for NewRegistry. It disables +// signature storage and ensures all schema1 manifests will only be returned +// with a signature from a provided signing key. +func DisableSchema1Signatures(registry *registry) error { + registry.schema1SignaturesEnabled = false return nil } +// Schema1SigningKey returns a functional option for NewRegistry. It sets the +// signing key for adding a signature to all schema1 manifests. This should be +// used in conjunction with disabling signature store. +func Schema1SigningKey(key libtrust.PrivateKey) RegistryOption { + return func(registry *registry) error { + registry.schema1SigningKey = key + return nil + } +} + +// BlobDescriptorServiceFactory returns a functional option for NewRegistry. It sets the +// factory to create BlobDescriptorServiceFactory middleware. +func BlobDescriptorServiceFactory(factory distribution.BlobDescriptorServiceFactory) RegistryOption { + return func(registry *registry) error { + registry.blobDescriptorServiceFactory = factory + return nil + } +} + // BlobDescriptorCacheProvider returns a functional option for // NewRegistry. It creates a cached blob statter for use by the // registry. @@ -96,8 +116,9 @@ func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, option statter: statter, pathFn: bs.path, }, - statter: statter, - resumableDigestEnabled: true, + statter: statter, + resumableDigestEnabled: true, + schema1SignaturesEnabled: true, } for _, option := range options { @@ -118,18 +139,11 @@ func (reg *registry) Scope() distribution.Scope { // Repository returns an instance of the repository tied to the registry. // Instances should not be shared between goroutines but are cheap to // allocate. In general, they should be request scoped. -func (reg *registry) Repository(ctx context.Context, canonicalName string) (distribution.Repository, error) { - if _, err := reference.ParseNamed(canonicalName); err != nil { - return nil, distribution.ErrRepositoryNameInvalid{ - Name: canonicalName, - Reason: err, - } - } - +func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) { var descriptorCache distribution.BlobDescriptorService if reg.blobDescriptorCacheProvider != nil { var err error - descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName) + descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name()) if err != nil { return nil, err } @@ -143,42 +157,36 @@ func (reg *registry) Repository(ctx context.Context, canonicalName string) (dist }, nil } -// Blobs returns an instance of the BlobServer for registry's blob access. -func (reg *registry) Blobs() distribution.BlobService { +func (reg *registry) Blobs() distribution.BlobEnumerator { return reg.blobStore } -// RegistryBlobEnumerator returns an instance of BlobEnumerator for given registry object. -func RegistryBlobEnumerator(ns distribution.Namespace) (distribution.BlobEnumerator, error) { - reg, ok := ns.(*registry) - if !ok { - return nil, fmt.Errorf("cannot instantiate BlobEnumerator with given namespace object (%T)", ns) - } - return reg.blobStore, nil -} - -// RegistryBlobDeleter returns an instance of BlobDeleter for given registry object. -func RegistryBlobDeleter(ns distribution.Namespace) (distribution.BlobDeleter, error) { - reg, ok := ns.(*registry) - if !ok { - return nil, fmt.Errorf("cannot instantiate BlobDeleter with given namespace object (%T)", ns) - } - return reg.blobStore, nil +func (reg *registry) BlobStatter() distribution.BlobStatter { + return reg.statter } // repository provides name-scoped access to various services. type repository struct { *registry ctx context.Context - name string + name reference.Named descriptorCache distribution.BlobDescriptorService } // Name returns the name of the repository. -func (repo *repository) Name() string { +func (repo *repository) Named() reference.Named { return repo.name } +func (repo *repository) Tags(ctx context.Context) distribution.TagService { + tags := &tagStore{ + repository: repo, + blobStore: repo.registry.blobStore, + } + + return tags +} + // Manifests returns an instance of ManifestService. Instantiation is cheap and // may be context sensitive in the future. The instance should be used similar // to a request local. @@ -189,46 +197,61 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M manifestRevisionLinkPath, blobLinkPath, } - manifestRootPathFns := []blobsRootPathFunc{ - manifestRevisionsPath, - blobsRootPath, + + manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()} + + var statter distribution.BlobDescriptorService = &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: manifestLinkPathFns, + } + + if repo.registry.blobDescriptorServiceFactory != nil { + statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) + } + + blobStore := &linkedBlobStore{ + ctx: ctx, + blobStore: repo.blobStore, + repository: repo, + deleteEnabled: repo.registry.deleteEnabled, + blobAccessController: statter, + + // TODO(stevvooe): linkPath limits this blob store to only + // manifests. This instance cannot be used for blob checks. + linkPathFns: manifestLinkPathFns, + linkDirectoryPathSpec: manifestDirectoryPathSpec, } ms := &manifestStore{ ctx: ctx, repository: repo, - revisionStore: &revisionStore{ + blobStore: blobStore, + schema1Handler: &signedManifestHandler{ ctx: ctx, repository: repo, - blobStore: &linkedBlobStore{ - ctx: ctx, - blobStore: repo.blobStore, - repository: repo, - deleteEnabled: repo.registry.deleteEnabled, - blobAccessController: &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPathFns: manifestLinkPathFns, - removeParentsOnDelete: repo.registry.blobStore.removeParentsOnDelete, - }, - - // TODO(stevvooe): linkPath limits this blob store to only - // manifests. This instance cannot be used for blob checks. - linkPathFns: manifestLinkPathFns, - blobsRootPathFns: manifestRootPathFns, - resumableDigestEnabled: repo.resumableDigestEnabled, + blobStore: blobStore, + signatures: &signatureStore{ + ctx: ctx, + repository: repo, + blobStore: repo.blobStore, }, }, - tagStore: &tagStore{ + schema2Handler: &schema2ManifestHandler{ ctx: ctx, repository: repo, - blobStore: repo.registry.blobStore, + blobStore: blobStore, + }, + manifestListHandler: &manifestListHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, }, } // Apply options for _, option := range options { - err := option(ms) + err := option.Apply(ms) if err != nil { return nil, err } @@ -242,17 +265,21 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M // to a request local. func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { var statter distribution.BlobDescriptorService = &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPathFns: []linkPathFunc{blobLinkPath}, - removeParentsOnDelete: repo.registry.blobStore.removeParentsOnDelete, + blobStore: repo.blobStore, + repository: repo, + linkPathFns: []linkPathFunc{blobLinkPath}, } if repo.descriptorCache != nil { statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) } + if repo.registry.blobDescriptorServiceFactory != nil { + statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) + } + return &linkedBlobStore{ + registry: repo.registry, blobStore: repo.blobStore, blobServer: repo.blobServer, blobAccessController: statter, @@ -262,16 +289,7 @@ func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { // TODO(stevvooe): linkPath limits this blob store to only layers. // This instance cannot be used for manifest checks. linkPathFns: []linkPathFunc{blobLinkPath}, - blobsRootPathFns: []blobsRootPathFunc{blobsRootPath}, deleteEnabled: repo.registry.deleteEnabled, resumableDigestEnabled: repo.resumableDigestEnabled, } } - -func (repo *repository) Signatures() distribution.SignatureService { - return &signatureStore{ - repository: repo, - blobStore: repo.blobStore, - ctx: repo.ctx, - } -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/revisionstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/revisionstore.go deleted file mode 100644 index 421c5dcecf0d..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/revisionstore.go +++ /dev/null @@ -1,116 +0,0 @@ -package storage - -import ( - "encoding/json" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/libtrust" -) - -// revisionStore supports storing and managing manifest revisions. -type revisionStore struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context -} - -// get retrieves the manifest, keyed by revision digest. -func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*schema1.SignedManifest, error) { - // Ensure that this revision is available in this repository. - _, err := rs.blobStore.Stat(ctx, revision) - if err != nil { - if err == distribution.ErrBlobUnknown { - return nil, distribution.ErrManifestUnknownRevision{ - Name: rs.repository.Name(), - Revision: revision, - } - } - - return nil, err - } - - // TODO(stevvooe): Need to check descriptor from above to ensure that the - // mediatype is as we expect for the manifest store. - - content, err := rs.blobStore.Get(ctx, revision) - if err != nil { - if err == distribution.ErrBlobUnknown { - return nil, distribution.ErrManifestUnknownRevision{ - Name: rs.repository.Name(), - Revision: revision, - } - } - - return nil, err - } - - // Fetch the signatures for the manifest - signatures, err := rs.repository.Signatures().Get(revision) - if err != nil { - return nil, err - } - - jsig, err := libtrust.NewJSONSignature(content, signatures...) - if err != nil { - return nil, err - } - - // Extract the pretty JWS - raw, err := jsig.PrettySignature("signatures") - if err != nil { - return nil, err - } - - var sm schema1.SignedManifest - if err := json.Unmarshal(raw, &sm); err != nil { - return nil, err - } - - return &sm, nil -} - -// put stores the manifest in the repository, if not already present. Any -// updated signatures will be stored, as well. -func (rs *revisionStore) put(ctx context.Context, sm *schema1.SignedManifest) (distribution.Descriptor, error) { - // Resolve the payload in the manifest. - payload, err := sm.Payload() - if err != nil { - return distribution.Descriptor{}, err - } - - // Digest and store the manifest payload in the blob store. - revision, err := rs.blobStore.Put(ctx, schema1.ManifestMediaType, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return distribution.Descriptor{}, err - } - - // Link the revision into the repository. - if err := rs.blobStore.linkBlob(ctx, revision); err != nil { - return distribution.Descriptor{}, err - } - - // Grab each json signature and store them. - signatures, err := sm.Signatures() - if err != nil { - return distribution.Descriptor{}, err - } - - if err := rs.repository.Signatures().Put(revision.Digest, signatures...); err != nil { - return distribution.Descriptor{}, err - } - - return revision, nil -} - -// enumerate returns an array of digests of all found manifest revisions. -func (rs *revisionStore) enumerate() ([]digest.Digest, error) { - return enumerateAllBlobs(rs.blobStore, rs.ctx) -} - -func (rs *revisionStore) delete(ctx context.Context, revision digest.Digest) error { - return rs.blobStore.Delete(ctx, revision) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/schema2manifesthandler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/schema2manifesthandler.go new file mode 100644 index 000000000000..6456efa4e143 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/schema2manifesthandler.go @@ -0,0 +1,128 @@ +package storage + +import ( + "errors" + "fmt" + "net/url" + + "encoding/json" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" +) + +var ( + errUnexpectedURL = errors.New("unexpected URL on layer") + errMissingURL = errors.New("missing URL on layer") + errInvalidURL = errors.New("invalid URL on layer") +) + +//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. +type schema2ManifestHandler struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context +} + +var _ ManifestHandler = &schema2ManifestHandler{} + +func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") + + var m schema2.DeserializedManifest + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") + + m, ok := manifest.(*schema2.DeserializedManifest) + if !ok { + return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + // Link the revision into the repository. + if err := ms.blobStore.linkBlob(ctx, revision); err != nil { + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to store +// valid content, leaving trust policies of that content up to consumers. +func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if !skipDependencyVerification { + target := mnfst.Target() + _, err := ms.repository.Blobs(ctx).Stat(ctx, target.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: target.Digest}) + } + + for _, fsLayer := range mnfst.References() { + var err error + if fsLayer.MediaType != schema2.MediaTypeForeignLayer { + if len(fsLayer.URLs) == 0 { + _, err = ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + } else { + err = errUnexpectedURL + } + } else { + // Clients download this layer from an external URL, so do not check for + // its presense. + if len(fsLayer.URLs) == 0 { + err = errMissingURL + } + for _, u := range fsLayer.URLs { + var pu *url.URL + pu, err = url.Parse(u) + if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" { + err = errInvalidURL + } + } + } + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/schema2manifesthandler_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/schema2manifesthandler_test.go new file mode 100644 index 000000000000..c2f61edf4a1a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/schema2manifesthandler_test.go @@ -0,0 +1,117 @@ +package storage + +import ( + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func TestVerifyManifestForeignLayer(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "test") + manifestService := makeManifestService(t, repo) + + config, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeConfig, nil) + if err != nil { + t.Fatal(err) + } + + layer, err := repo.Blobs(ctx).Put(ctx, schema2.MediaTypeLayer, nil) + if err != nil { + t.Fatal(err) + } + + foreignLayer := distribution.Descriptor{ + Digest: "sha256:463435349086340864309863409683460843608348608934092322395278926a", + Size: 6323, + MediaType: schema2.MediaTypeForeignLayer, + } + + template := schema2.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: schema2.MediaTypeManifest, + }, + Config: config, + } + + type testcase struct { + BaseLayer distribution.Descriptor + URLs []string + Err error + } + + cases := []testcase{ + { + foreignLayer, + nil, + errMissingURL, + }, + { + layer, + []string{"http://foo/bar"}, + errUnexpectedURL, + }, + { + foreignLayer, + []string{"file:///local/file"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/bar#baz"}, + errInvalidURL, + }, + { + foreignLayer, + []string{""}, + errInvalidURL, + }, + { + foreignLayer, + []string{"https://foo/bar", ""}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/bar"}, + nil, + }, + { + foreignLayer, + []string{"https://foo/bar"}, + nil, + }, + } + + for _, c := range cases { + m := template + l := c.BaseLayer + l.URLs = c.URLs + m.Layers = []distribution.Descriptor{l} + dm, err := schema2.FromStruct(m) + if err != nil { + t.Error(err) + continue + } + + _, err = manifestService.Put(ctx, dm) + if verr, ok := err.(distribution.ErrManifestVerification); ok { + // Extract the first error + if len(verr) == 2 { + if _, ok = verr[1].(distribution.ErrManifestBlobUnknown); ok { + err = verr[0] + } + } + } + if err != c.Err { + t.Errorf("%#v: expected %v, got %v", l, c.Err, err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go index cbeedd36950a..2940e0415236 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signaturestore.go @@ -4,7 +4,6 @@ import ( "path" "sync" - "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" ) @@ -15,19 +14,9 @@ type signatureStore struct { ctx context.Context } -func newSignatureStore(ctx context.Context, repo *repository, blobStore *blobStore) *signatureStore { - return &signatureStore{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - } -} - -var _ distribution.SignatureService = &signatureStore{} - func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { signaturesPath, err := pathFor(manifestSignaturesPathSpec{ - name: s.repository.Name(), + name: s.repository.Named().Name(), revision: dgst, }) @@ -105,11 +94,6 @@ loop: return signatures, err } -// Enumerate returns an array of digests of manifest signatures. -func (s *signatureStore) Enumerate(manifestReference digest.Digest) ([]digest.Digest, error) { - return enumerateAllBlobs(s.linkedBlobStore(s.ctx, manifestReference), s.ctx) -} - func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { bs := s.linkedBlobStore(s.ctx, dgst) for _, signature := range signatures { @@ -120,21 +104,6 @@ func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { return nil } -// Delete removes all signature links of given manifest revision. -func (s *signatureStore) Delete(revision digest.Digest) error { - dgsts, err := s.Enumerate(revision) - if err != nil { - return err - } - lbs := s.linkedBlobStore(s.ctx, revision) - for _, dgst := range dgsts { - if err = lbs.Delete(s.ctx, dgst); err != nil { - return err - } - } - return nil -} - // linkedBlobStore returns the namedBlobStore of the signatures for the // manifest with the given digest. Effectively, each signature link path // layout is a unique linked blob store. @@ -145,12 +114,7 @@ func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Di revision: revision, signature: dgst, }) - } - linkRootPath := func(name string) (string, error) { - return pathFor(manifestSignaturesPathSpec{ - name: name, - revision: revision, - }) + } return &linkedBlobStore{ @@ -158,12 +122,10 @@ func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Di repository: s.repository, blobStore: s.blobStore, blobAccessController: &linkedBlobStatter{ - blobStore: s.blobStore, - repository: s.repository, - linkPathFns: []linkPathFunc{linkpath}, - removeParentsOnDelete: true, + blobStore: s.blobStore, + repository: s.repository, + linkPathFns: []linkPathFunc{linkpath}, }, - linkPathFns: []linkPathFunc{linkpath}, - blobsRootPathFns: []blobsRootPathFunc{linkRootPath}, + linkPathFns: []linkPathFunc{linkpath}, } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signedmanifesthandler.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signedmanifesthandler.go new file mode 100644 index 000000000000..8e13dd932b9a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/signedmanifesthandler.go @@ -0,0 +1,167 @@ +package storage + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It +// can unmarshal and put schema1 manifests that have been signed by libtrust. +type signedManifestHandler struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context + signatures *signatureStore +} + +var _ ManifestHandler = &signedManifestHandler{} + +func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") + + var ( + signatures [][]byte + err error + ) + if ms.repository.schema1SignaturesEnabled { + // Fetch the signatures for the manifest + signatures, err = ms.signatures.Get(dgst) + if err != nil { + return nil, err + } + } + + jsig, err := libtrust.NewJSONSignature(content, signatures...) + if err != nil { + return nil, err + } + + if ms.repository.schema1SigningKey != nil { + if err := jsig.Sign(ms.repository.schema1SigningKey); err != nil { + return nil, err + } + } else if !ms.repository.schema1SignaturesEnabled { + return nil, fmt.Errorf("missing signing key with signature store disabled") + } + + // Extract the pretty JWS + raw, err := jsig.PrettySignature("signatures") + if err != nil { + return nil, err + } + + var sm schema1.SignedManifest + if err := json.Unmarshal(raw, &sm); err != nil { + return nil, err + } + return &sm, nil +} + +func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") + + sm, ok := manifest.(*schema1.SignedManifest) + if !ok { + return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil { + return "", err + } + + mt := schema1.MediaTypeManifest + payload := sm.Canonical + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + // Link the revision into the repository. + if err := ms.blobStore.linkBlob(ctx, revision); err != nil { + return "", err + } + + if ms.repository.schema1SignaturesEnabled { + // Grab each json signature and store them. + signatures, err := sm.Signatures() + if err != nil { + return "", err + } + + if err := ms.signatures.Put(revision.Digest, signatures...); err != nil { + return "", err + } + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. It ensures that the signature is valid for the +// enclosed payload. As a policy, the registry only tries to store valid +// content, leaving trust policies of that content up to consumers. +func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if len(mnfst.Name) > reference.NameTotalLengthMax { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), + }) + } + + if !reference.NameRegexp.MatchString(mnfst.Name) { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("invalid manifest name format"), + }) + } + + if len(mnfst.History) != len(mnfst.FSLayers) { + errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", + len(mnfst.History), len(mnfst.FSLayers))) + } + + if _, err := schema1.Verify(&mnfst); err != nil { + switch err { + case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: + errs = append(errs, distribution.ErrManifestUnverified{}) + default: + if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust + errs = append(errs, distribution.ErrManifestUnverified{}) + } else { + errs = append(errs, err) + } + } + } + + if !skipDependencyVerification { + for _, fsLayer := range mnfst.References() { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go index aec95286048e..4386ffcac9f3 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore.go @@ -9,37 +9,41 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" ) +var _ distribution.TagService = &tagStore{} + // tagStore provides methods to manage manifest tags in a backend storage driver. +// This implementation uses the same on-disk layout as the (now deleted) tag +// store. This provides backward compatibility with current registry deployments +// which only makes use of the Digest field of the returned distribution.Descriptor +// but does not enable full roundtripping of Descriptor objects type tagStore struct { repository *repository blobStore *blobStore - ctx context.Context } -// tags lists the manifest tags for the specified repository. -func (ts *tagStore) tags() ([]string, error) { - p, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Name(), - }) +// All returns all tags +func (ts *tagStore) All(ctx context.Context) ([]string, error) { + var tags []string + pathSpec, err := pathFor(manifestTagPathSpec{ + name: ts.repository.Named().Name(), + }) if err != nil { - return nil, err + return tags, err } - var tags []string - entries, err := ts.blobStore.driver.List(ts.ctx, p) + entries, err := ts.blobStore.driver.List(ctx, pathSpec) if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: - return nil, distribution.ErrRepositoryUnknown{Name: ts.repository.Name()} + return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Named().Name()} default: - return nil, err + return tags, err } } for _, entry := range entries { _, filename := path.Split(entry) - tags = append(tags, filename) } @@ -47,9 +51,9 @@ func (ts *tagStore) tags() ([]string, error) { } // exists returns true if the specified manifest tag exists in the repository. -func (ts *tagStore) exists(tag string) (bool, error) { +func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { tagPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name(), + name: ts.repository.Named().Name(), tag: tag, }) @@ -57,7 +61,7 @@ func (ts *tagStore) exists(tag string) (bool, error) { return false, err } - exists, err := exists(ts.ctx, ts.blobStore.driver, tagPath) + exists, err := exists(ctx, ts.blobStore.driver, tagPath) if err != nil { return false, err } @@ -65,11 +69,11 @@ func (ts *tagStore) exists(tag string) (bool, error) { return exists, nil } -// tag tags the digest with the given tag, updating the the store to point at +// Tag tags the digest with the given tag, updating the the store to point at // the current tag. The digest must point to a manifest. -func (ts *tagStore) tag(tag string, revision digest.Digest) error { +func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name(), + name: ts.repository.Named().Name(), tag: tag, }) @@ -77,53 +81,58 @@ func (ts *tagStore) tag(tag string, revision digest.Digest) error { return err } - nbs := ts.linkedBlobStore(ts.ctx, tag) + lbs := ts.linkedBlobStore(ctx, tag) + // Link into the index - if err := nbs.linkBlob(ts.ctx, distribution.Descriptor{Digest: revision}); err != nil { + if err := lbs.linkBlob(ctx, desc); err != nil { return err } // Overwrite the current link - return ts.blobStore.link(ts.ctx, currentPath, revision) + return ts.blobStore.link(ctx, currentPath, desc.Digest) } // resolve the current revision for name and tag. -func (ts *tagStore) resolve(tag string) (digest.Digest, error) { +func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name(), + name: ts.repository.Named().Name(), tag: tag, }) if err != nil { - return "", err + return distribution.Descriptor{}, err } - revision, err := ts.blobStore.readlink(ts.ctx, currentPath) + revision, err := ts.blobStore.readlink(ctx, currentPath) if err != nil { switch err.(type) { case storagedriver.PathNotFoundError: - return "", distribution.ErrManifestUnknown{Name: ts.repository.Name(), Tag: tag} + return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} } - return "", err + return distribution.Descriptor{}, err } - return revision, nil + return distribution.Descriptor{Digest: revision}, nil } -// delete removes the tag from repository, including the history of all -// revisions that have the specified tag. -func (ts *tagStore) delete(tag string) error { +// Untag removes the tag association +func (ts *tagStore) Untag(ctx context.Context, tag string) error { tagPath, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Name(), + name: ts.repository.Named().Name(), tag: tag, }) - if err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + return distribution.ErrTagUnknown{Tag: tag} + case nil: + break + default: return err } - return ts.blobStore.driver.Delete(ts.ctx, tagPath) + return ts.blobStore.driver.Delete(ctx, tagPath) } // linkedBlobStore returns the linkedBlobStore for the named tag, allowing one @@ -145,3 +154,38 @@ func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlob }}, } } + +// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by +// digest, tag entries which point to it need to be recovered to avoid dangling tags. +func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { + allTags, err := ts.All(ctx) + switch err.(type) { + case distribution.ErrRepositoryUnknown: + // This tag store has been initialized but not yet populated + break + case nil: + break + default: + return nil, err + } + + var tags []string + for _, tag := range allTags { + tagLinkPathSpec := manifestTagCurrentPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + } + + tagLinkPath, err := pathFor(tagLinkPathSpec) + tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) + if err != nil { + return nil, err + } + + if tagDigest == desc.Digest { + tags = append(tags, tag) + } + } + + return tags, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore_test.go new file mode 100644 index 000000000000..554a46bf77be --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/tagstore_test.go @@ -0,0 +1,209 @@ +package storage + +import ( + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +type tagsTestEnv struct { + ts distribution.TagService + ctx context.Context +} + +func testTagStore(t *testing.T) *tagsTestEnv { + ctx := context.Background() + d := inmemory.New() + reg, err := NewRegistry(ctx, d) + if err != nil { + t.Fatal(err) + } + + repoRef, _ := reference.ParseNamed("a/b") + repo, err := reg.Repository(ctx, repoRef) + if err != nil { + t.Fatal(err) + } + + return &tagsTestEnv{ + ctx: ctx, + ts: repo.Tags(ctx), + } +} + +func TestTagStoreTag(t *testing.T) { + env := testTagStore(t) + tags := env.ts + ctx := env.ctx + + d := distribution.Descriptor{} + err := tags.Tag(ctx, "latest", d) + if err == nil { + t.Errorf("unexpected error putting malformed descriptor : %s", err) + } + + d.Digest = "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + err = tags.Tag(ctx, "latest", d) + if err != nil { + t.Error(err) + } + + d1, err := tags.Get(ctx, "latest") + if err != nil { + t.Error(err) + } + + if d1.Digest != d.Digest { + t.Error("put and get digest differ") + } + + // Overwrite existing + d.Digest = "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + err = tags.Tag(ctx, "latest", d) + if err != nil { + t.Error(err) + } + + d1, err = tags.Get(ctx, "latest") + if err != nil { + t.Error(err) + } + + if d1.Digest != d.Digest { + t.Error("put and get digest differ") + } +} + +func TestTagStoreUnTag(t *testing.T) { + env := testTagStore(t) + tags := env.ts + ctx := env.ctx + desc := distribution.Descriptor{Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"} + + err := tags.Untag(ctx, "latest") + if err == nil { + t.Errorf("Expected error untagging non-existant tag") + } + + err = tags.Tag(ctx, "latest", desc) + if err != nil { + t.Error(err) + } + + err = tags.Untag(ctx, "latest") + if err != nil { + t.Error(err) + } + + errExpect := distribution.ErrTagUnknown{Tag: "latest"}.Error() + _, err = tags.Get(ctx, "latest") + if err == nil || err.Error() != errExpect { + t.Error("Expected error getting untagged tag") + } +} + +func TestTagStoreAll(t *testing.T) { + env := testTagStore(t) + tagStore := env.ts + ctx := env.ctx + + alpha := "abcdefghijklmnopqrstuvwxyz" + for i := 0; i < len(alpha); i++ { + tag := alpha[i] + desc := distribution.Descriptor{Digest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"} + err := tagStore.Tag(ctx, string(tag), desc) + if err != nil { + t.Error(err) + } + } + + all, err := tagStore.All(ctx) + if err != nil { + t.Error(err) + } + if len(all) != len(alpha) { + t.Errorf("Unexpected count returned from enumerate") + } + + for i, c := range all { + if c != string(alpha[i]) { + t.Errorf("unexpected tag in enumerate %s", c) + } + } + + removed := "a" + err = tagStore.Untag(ctx, removed) + if err != nil { + t.Error(err) + } + + all, err = tagStore.All(ctx) + if err != nil { + t.Error(err) + } + for _, tag := range all { + if tag == removed { + t.Errorf("unexpected tag in enumerate %s", removed) + } + } + +} + +func TestTagLookup(t *testing.T) { + env := testTagStore(t) + tagStore := env.ts + ctx := env.ctx + + descA := distribution.Descriptor{Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"} + desc0 := distribution.Descriptor{Digest: "sha256:0000000000000000000000000000000000000000000000000000000000000000"} + + tags, err := tagStore.Lookup(ctx, descA) + if err != nil { + t.Fatal(err) + } + if len(tags) != 0 { + t.Fatalf("Lookup returned > 0 tags from empty store") + } + + err = tagStore.Tag(ctx, "a", descA) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "b", descA) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "0", desc0) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "1", desc0) + if err != nil { + t.Fatal(err) + } + + tags, err = tagStore.Lookup(ctx, descA) + if err != nil { + t.Fatal(err) + } + + if len(tags) != 2 { + t.Errorf("Lookup of descA returned %d tags, expected 2", len(tags)) + } + + tags, err = tagStore.Lookup(ctx, desc0) + if err != nil { + t.Fatal(err) + } + + if len(tags) != 2 { + t.Errorf("Lookup of descB returned %d tags, expected 2", len(tags)) + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go index 60d5a2fae1d8..3bdfebf2774c 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/vacuum.go @@ -34,11 +34,13 @@ func (v Vacuum) RemoveBlob(dgst string) error { return err } - blobPath, err := pathFor(blobDataPathSpec{digest: d}) + blobPath, err := pathFor(blobPathSpec{digest: d}) if err != nil { return err } + context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) + err = v.driver.Delete(v.ctx, blobPath) if err != nil { return err diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go index e6d9a89bc3ec..d979796ebd6f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk.go @@ -3,62 +3,31 @@ package storage import ( "errors" "fmt" - "regexp" "sort" - "strings" - "github.com/docker/distribution" "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" storageDriver "github.com/docker/distribution/registry/storage/driver" ) -var ( - reTarsumPrefix = regexp.MustCompile(`^tarsum(?:/(\w+))?`) - reDigestPath = regexp.MustCompile(fmt.Sprintf(`^([^/]+)/(?:\w{%d}/)?(\w+)$`, multilevelHexPrefixLength)) -) - // ErrSkipDir is used as a return value from onFileFunc to indicate that // the directory named in the call is to be skipped. It is not returned // as an error by any function. var ErrSkipDir = errors.New("skip this directory") -// ErrFinishedWalk is used when the called walk function no longer wants -// to accept any more values. This is used for pagination when the -// required number of items have been found. -var ErrFinishedWalk = errors.New("finished walk") - // WalkFn is called once per file by Walk // If the returned error is ErrSkipDir and fileInfo refers // to a directory, the directory will not be entered and Walk // will continue the traversal. Otherwise Walk will return type WalkFn func(fileInfo storageDriver.FileInfo) error -// WalkChildrenFilter transforms a list of directory children during a -// walk before before it's recursively traversed. -type WalkChildrenFilter func([]string) []string - -// walkChildrenSortedFilter causes Walk to process entries in a lexicographical -// order. -func walkChildrenSortedFilter(children []string) []string { - sort.Stable(sort.StringSlice(children)) - return children -} - -// walkChildrenNoFilter is an identity filter for directory children. -func walkChildrenNoFilter(children []string) []string { - return children -} - -// WalkWithChildrenFilter traverses a filesystem defined within driver, -// starting from the given path, calling f on each file. Given filter will be -// called on a list of directory children before being recursively processed. -func WalkWithChildrenFilter(ctx context.Context, driver storageDriver.StorageDriver, from string, filter WalkChildrenFilter, f WalkFn) error { +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { children, err := driver.List(ctx, from) if err != nil { return err } - filter(children) + sort.Stable(sort.StringSlice(children)) for _, child := range children { // TODO(stevvooe): Calling driver.Stat for every entry is quite // expensive when running against backends with a slow Stat @@ -75,7 +44,7 @@ func WalkWithChildrenFilter(ctx context.Context, driver storageDriver.StorageDri } if fileInfo.IsDir() && !skipDir { - if err := WalkWithChildrenFilter(ctx, driver, child, filter, f); err != nil { + if err := Walk(ctx, driver, child, f); err != nil { return err } } @@ -83,109 +52,8 @@ func WalkWithChildrenFilter(ctx context.Context, driver storageDriver.StorageDri return nil } -// Walk traverses a filesystem defined within driver, starting -// from the given path, calling f on each file. -func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { - return WalkWithChildrenFilter(ctx, driver, from, walkChildrenNoFilter, f) -} - -// WalkSortedChildren traverses a filesystem defined within driver, starting -// from the given path, calling f on each file in lexicographical order. -func WalkSortedChildren(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { - return WalkWithChildrenFilter(ctx, driver, from, walkChildrenSortedFilter, f) -} - // pushError formats an error type given a path and an error // and pushes it to a slice of errors func pushError(errors []error, path string, err error) []error { return append(errors, fmt.Errorf("%s: %s", path, err)) } - -// makeBlobStoreWalkFunc returns a function for walking a blob store at -// particular rootPath. The returned function calls a given ingest callback on -// each digest found. The blob store is expected to have following layout: -// -// if multilevel is true: -// /// -// /tarsum//// -// otherwise: -// // -// /tarsum/// -func makeBlobStoreWalkFunc(rootPath string, multilevel bool, ingest func(digest.Digest) error) (WalkFn, error) { - var ( - // number of slashes in a path to a full digest directory under a rootPath - blobRefPathSepCount int - blobTarsumRefPathSepCount int - ) - - if multilevel { - // // - blobRefPathSepCount = 2 - // tarsum//// - blobTarsumRefPathSepCount = 4 - } else { - // / - blobRefPathSepCount = 1 - // tarsum/// - blobTarsumRefPathSepCount = 3 - } - - return func(fi storageDriver.FileInfo) error { - if !fi.IsDir() { - // ignore files - return nil - } - - // trim / prefix - pth := strings.TrimPrefix(strings.TrimPrefix(fi.Path(), rootPath), "/") - sepCount := strings.Count(pth, "/") - - if sepCount < blobRefPathSepCount { - // don't bother finding digests in a too short path - return nil - } - - alg := "" - tarsumParts := reTarsumPrefix.FindStringSubmatch(pth) - isTarsum := len(tarsumParts) > 0 - if sepCount > blobTarsumRefPathSepCount || (!isTarsum && sepCount > blobRefPathSepCount) { - // too many path components - return ErrSkipDir - } - - if len(tarsumParts) > 0 { - alg = "tarsum." + tarsumParts[1] + "+" - // trim "tarsum//" prefix from path - pth = strings.TrimPrefix(pth[len(tarsumParts[0]):], "/") - } - - digestParts := reDigestPath.FindStringSubmatch(pth) - if len(digestParts) > 0 { - alg += digestParts[1] - dgstHex := digestParts[2] - dgst := digest.NewDigestFromHex(alg, dgstHex) - // append only valid digests - if err := dgst.Validate(); err == nil { - err := ingest(dgst) - if err != nil { - return ErrFinishedWalk - } - } - return ErrSkipDir - } - - return nil - }, nil -} - -// enumerateAllBlobs is a utility function that returns all the blob digests -// found in given blob store. It should be used with care because of memory and -// time complexity. -func enumerateAllBlobs(be distribution.BlobEnumerator, ctx context.Context) ([]digest.Digest, error) { - res := []digest.Digest{} - err := be.Enumerate(ctx, func(dgst digest.Digest) error { - res = append(res, dgst) - return nil - }) - return res, err -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go index 42a119d6156c..3d7a4b1b631a 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/storage/walk_test.go @@ -73,7 +73,7 @@ func TestWalkErrors(t *testing.T) { } } - err = Walk(ctx, d, "/nonexistant", func(fileInfo driver.FileInfo) error { + err = Walk(ctx, d, "/nonexistent", func(fileInfo driver.FileInfo) error { return nil }) if err == nil { @@ -116,47 +116,6 @@ func TestWalk(t *testing.T) { t.Errorf("Missed files in walk: %q", expected) } - if err != nil { - t.Fatalf(err.Error()) - } -} - -func TestWalkSortedChildren(t *testing.T) { - d, expected, ctx := testFS(t) - var traversed []string - - err := WalkSortedChildren(ctx, d, "/", func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - filetype, ok := expected[filePath] - - if !ok { - t.Fatalf("Unexpected file in walk: %q", filePath) - } - - if fileInfo.IsDir() { - if filetype != "dir" { - t.Errorf("Unexpected file type: %q", filePath) - } - } else { - if filetype != "file" { - t.Errorf("Unexpected file type: %q", filePath) - } - - // each file has its own path as the contents. If the length - // doesn't match the path length, fail. - if fileInfo.Size() != int64(len(fileInfo.Path())) { - t.Fatalf("unexpected size for %q: %v != %v", - fileInfo.Path(), fileInfo.Size(), len(fileInfo.Path())) - } - } - delete(expected, filePath) - traversed = append(traversed, filePath) - return nil - }) - if len(expected) > 0 { - t.Errorf("Missed files in walk: %q", expected) - } - if !sort.StringsAreSorted(traversed) { t.Errorf("result should be sorted: %v", traversed) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/tags.go b/Godeps/_workspace/src/github.com/docker/distribution/tags.go new file mode 100644 index 000000000000..503056596371 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/tags.go @@ -0,0 +1,27 @@ +package distribution + +import ( + "github.com/docker/distribution/context" +) + +// TagService provides access to information about tagged objects. +type TagService interface { + // Get retrieves the descriptor identified by the tag. Some + // implementations may differentiate between "trusted" tags and + // "untrusted" tags. If a tag is "untrusted", the mapping will be returned + // as an ErrTagUntrusted error, with the target descriptor. + Get(ctx context.Context, tag string) (Descriptor, error) + + // Tag associates the tag with the provided descriptor, updating the + // current association, if needed. + Tag(ctx context.Context, tag string, desc Descriptor) error + + // Untag removes the given tag association + Untag(ctx context.Context, tag string) error + + // All returns the set of tags managed by this tag service + All(ctx context.Context) ([]string, error) + + // Lookup returns the set of tags referencing the given digest. + Lookup(ctx context.Context, digest Descriptor) ([]string, error) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go b/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go deleted file mode 100644 index 00cd8a6ac291..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/testutil/handler.go +++ /dev/null @@ -1,148 +0,0 @@ -package testutil - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "sort" - "strings" -) - -// RequestResponseMap is an ordered mapping from Requests to Responses -type RequestResponseMap []RequestResponseMapping - -// RequestResponseMapping defines a Response to be sent in response to a given -// Request -type RequestResponseMapping struct { - Request Request - Response Response -} - -// Request is a simplified http.Request object -type Request struct { - // Method is the http method of the request, for example GET - Method string - - // Route is the http route of this request - Route string - - // QueryParams are the query parameters of this request - QueryParams map[string][]string - - // Body is the byte contents of the http request - Body []byte - - // Headers are the header for this request - Headers http.Header -} - -func (r Request) String() string { - queryString := "" - if len(r.QueryParams) > 0 { - keys := make([]string, 0, len(r.QueryParams)) - queryParts := make([]string, 0, len(r.QueryParams)) - for k := range r.QueryParams { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - for _, val := range r.QueryParams[k] { - queryParts = append(queryParts, fmt.Sprintf("%s=%s", k, url.QueryEscape(val))) - } - } - queryString = "?" + strings.Join(queryParts, "&") - } - var headers []string - if len(r.Headers) > 0 { - var headerKeys []string - for k := range r.Headers { - headerKeys = append(headerKeys, k) - } - sort.Strings(headerKeys) - - for _, k := range headerKeys { - for _, val := range r.Headers[k] { - headers = append(headers, fmt.Sprintf("%s:%s", k, val)) - } - } - - } - return fmt.Sprintf("%s %s%s\n%s\n%s", r.Method, r.Route, queryString, headers, r.Body) -} - -// Response is a simplified http.Response object -type Response struct { - // Statuscode is the http status code of the Response - StatusCode int - - // Headers are the http headers of this Response - Headers http.Header - - // Body is the response body - Body []byte -} - -// testHandler is an http.Handler with a defined mapping from Request to an -// ordered list of Response objects -type testHandler struct { - responseMap map[string][]Response -} - -// NewHandler returns a new test handler that responds to defined requests -// with specified responses -// Each time a Request is received, the next Response is returned in the -// mapping, until no Responses are defined, at which point a 404 is sent back -func NewHandler(requestResponseMap RequestResponseMap) http.Handler { - responseMap := make(map[string][]Response) - for _, mapping := range requestResponseMap { - responses, ok := responseMap[mapping.Request.String()] - if ok { - responseMap[mapping.Request.String()] = append(responses, mapping.Response) - } else { - responseMap[mapping.Request.String()] = []Response{mapping.Response} - } - } - return &testHandler{responseMap: responseMap} -} - -func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - - requestBody, _ := ioutil.ReadAll(r.Body) - request := Request{ - Method: r.Method, - Route: r.URL.Path, - QueryParams: r.URL.Query(), - Body: requestBody, - Headers: make(map[string][]string), - } - - // Add headers of interest here - for k, v := range r.Header { - if k == "If-None-Match" { - request.Headers[k] = v - } - } - - responses, ok := app.responseMap[request.String()] - - if !ok || len(responses) == 0 { - http.NotFound(w, r) - return - } - - response := responses[0] - app.responseMap[request.String()] = responses[1:] - - responseHeader := w.Header() - for k, v := range response.Headers { - responseHeader[k] = v - } - - w.WriteHeader(response.StatusCode) - - io.Copy(w, bytes.NewReader(response.Body)) -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/testutil/tarfile.go b/Godeps/_workspace/src/github.com/docker/distribution/testutil/tarfile.go deleted file mode 100644 index 08b796f5e96d..000000000000 --- a/Godeps/_workspace/src/github.com/docker/distribution/testutil/tarfile.go +++ /dev/null @@ -1,95 +0,0 @@ -package testutil - -import ( - "archive/tar" - "bytes" - "crypto/rand" - "fmt" - "io" - "io/ioutil" - mrand "math/rand" - "time" - - "github.com/docker/docker/pkg/tarsum" -) - -// CreateRandomTarFile creates a random tarfile, returning it as an -// io.ReadSeeker along with its tarsum. An error is returned if there is a -// problem generating valid content. -func CreateRandomTarFile() (rs io.ReadSeeker, tarSum string, err error) { - nFiles := mrand.Intn(10) + 10 - target := &bytes.Buffer{} - wr := tar.NewWriter(target) - - // Perturb this on each iteration of the loop below. - header := &tar.Header{ - Mode: 0644, - ModTime: time.Now(), - Typeflag: tar.TypeReg, - Uname: "randocalrissian", - Gname: "cloudcity", - AccessTime: time.Now(), - ChangeTime: time.Now(), - } - - for fileNumber := 0; fileNumber < nFiles; fileNumber++ { - fileSize := mrand.Int63n(1<<20) + 1<<20 - - header.Name = fmt.Sprint(fileNumber) - header.Size = fileSize - - if err := wr.WriteHeader(header); err != nil { - return nil, "", err - } - - randomData := make([]byte, fileSize) - - // Fill up the buffer with some random data. - n, err := rand.Read(randomData) - - if n != len(randomData) { - return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) - } - - if err != nil { - return nil, "", err - } - - nn, err := io.Copy(wr, bytes.NewReader(randomData)) - if nn != fileSize { - return nil, "", fmt.Errorf("short copy writing random file to tar") - } - - if err != nil { - return nil, "", err - } - - if err := wr.Flush(); err != nil { - return nil, "", err - } - } - - if err := wr.Close(); err != nil { - return nil, "", err - } - - reader := bytes.NewReader(target.Bytes()) - - // A tar builder that supports tarsum inline calculation would be awesome - // here. - ts, err := tarsum.NewTarSum(reader, true, tarsum.Version1) - if err != nil { - return nil, "", err - } - - nn, err := io.Copy(ioutil.Discard, ts) - if nn != int64(len(target.Bytes())) { - return nil, "", fmt.Errorf("short copy when getting tarsum of random layer: %v != %v", nn, len(target.Bytes())) - } - - if err != nil { - return nil, "", err - } - - return bytes.NewReader(target.Bytes()), ts.Sum(nil), nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go index a6e2eb8ab6df..dc7bfdca08b8 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -167,8 +167,9 @@ type BlobType string // Types of page blobs const ( - BlobTypeBlock BlobType = "BlockBlob" - BlobTypePage BlobType = "PageBlob" + BlobTypeBlock BlobType = "BlockBlob" + BlobTypePage BlobType = "PageBlob" + BlobTypeAppend BlobType = "AppendBlob" ) // PageWriteType defines the type updates that are going to be @@ -330,7 +331,6 @@ func (b BlobStorageClient) createContainer(name string, access ContainerAccessTy uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) headers := b.client.getStandardHeaders() - headers["Content-Length"] = "0" if access != "" { headers["x-ms-blob-public-access"] = string(access) } @@ -541,17 +541,102 @@ func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobPrope }, nil } +// SetBlobMetadata replaces the metadata for the specified blob. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string) error { + params := url.Values{"comp": {"metadata"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + for k, v := range metadata { + headers[userDefinedMetadataHeaderPrefix+k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// GetBlobMetadata returns all user-defined metadata for the specified blob. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]string, error) { + params := url.Values{"comp": {"metadata"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + metadata := make(map[string]string) + for k, v := range resp.headers { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["foo"] = content of the last X-Ms-Meta-Foo header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + return metadata, nil +} + // CreateBlockBlob initializes an empty block blob with no blocks. // // See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx func (b BlobStorageClient) CreateBlockBlob(container, name string) error { + return b.CreateBlockBlobFromReader(container, name, 0, nil, nil) +} + +// CreateBlockBlobFromReader initializes a block blob using data from +// reader. Size must be the number of bytes read from reader. To +// create an empty blob, use size==0 and reader==nil. +// +// The API rejects requests with size > 64 MiB (but this limit is not +// checked by the SDK). To write a larger blob, use CreateBlockBlob, +// PutBlock, and PutBlockList. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader, extraHeaders map[string]string) error { path := fmt.Sprintf("%s/%s", container, name) uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) headers := b.client.getStandardHeaders() headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%v", 0) + headers["Content-Length"] = fmt.Sprintf("%d", size) - resp, err := b.client.exec("PUT", uri, headers, nil) + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, blob) if err != nil { return err } @@ -562,26 +647,37 @@ func (b BlobStorageClient) CreateBlockBlob(container, name string) error { // PutBlock saves the given data chunk to the specified block blob with // given ID. // +// The API rejects chunks larger than 4 MiB (but this limit is not +// checked by the SDK). +// // See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error { - return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk)) + return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk), nil) } // PutBlockWithLength saves the given data stream of exactly specified size to // the block blob with given ID. It is an alternative to PutBlocks where data // comes as stream but the length is known in advance. // +// The API rejects requests with size > 4 MiB (but this limit is not +// checked by the SDK). +// // See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx -func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader) error { +func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader, extraHeaders map[string]string) error { uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}}) headers := b.client.getStandardHeaders() headers["x-ms-blob-type"] = string(BlobTypeBlock) headers["Content-Length"] = fmt.Sprintf("%v", size) + for k, v := range extraHeaders { + headers[k] = v + } + resp, err := b.client.exec("PUT", uri, headers, blob) if err != nil { return err } + defer resp.body.Close() return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } @@ -628,13 +724,16 @@ func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockL // be created using this method before writing pages. // // See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) PutPageBlob(container, name string, size int64) error { +func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extraHeaders map[string]string) error { path := fmt.Sprintf("%s/%s", container, name) uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) headers := b.client.getStandardHeaders() headers["x-ms-blob-type"] = string(BlobTypePage) headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size) - headers["Content-Length"] = fmt.Sprintf("%v", 0) + + for k, v := range extraHeaders { + headers[k] = v + } resp, err := b.client.exec("PUT", uri, headers, nil) if err != nil { @@ -700,6 +799,48 @@ func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesR return out, err } +// PutAppendBlob initializes an empty append blob with specified name. An +// append blob must be created using this method before appending blocks. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// AppendBlock appends a block to an append blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/mt427365.aspx +func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"appendblock"}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + + resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk)) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + // CopyBlob starts a blob copy operation and waits for the operation to // complete. sourceBlob parameter must be a canonical URL to the blob (can be // obtained using GetBlobURL method.) There is no SLA on blob copy and therefore @@ -719,7 +860,6 @@ func (b BlobStorageClient) startBlobCopy(container, name, sourceBlob string) (st uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) headers := b.client.getStandardHeaders() - headers["Content-Length"] = "0" headers["x-ms-copy-source"] = sourceBlob resp, err := b.client.exec("PUT", uri, headers, nil) @@ -850,6 +990,10 @@ func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Tim func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string) (string, error) { var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/blob" + canonicalizedResource + } + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx if signedVersion >= "2013-08-15" { return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go index 6c171050a2f6..df6c1a7b2e91 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -5,6 +5,7 @@ import ( "bytes" "encoding/base64" "encoding/xml" + "errors" "fmt" "io" "io/ioutil" @@ -12,6 +13,7 @@ import ( "net/url" "regexp" "sort" + "strconv" "strings" ) @@ -22,13 +24,14 @@ const ( // DefaultAPIVersion is the Azure Storage API version string used when a // basic client is created. - DefaultAPIVersion = "2014-02-14" + DefaultAPIVersion = "2015-02-21" defaultUseHTTPS = true blobServiceName = "blob" tableServiceName = "table" queueServiceName = "queue" + fileServiceName = "file" ) // Client is the object that needs to be constructed to perform @@ -79,6 +82,11 @@ func (e UnexpectedStatusCodeError) Error() string { return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or ")) } +// Got is the actual status code returned by Azure. +func (e UnexpectedStatusCodeError) Got() int { + return e.got +} + // NewBasicClient constructs a Client with given storage service name and // key. func NewBasicClient(accountName, accountKey string) (Client, error) { @@ -154,6 +162,12 @@ func (c Client) GetQueueService() QueueServiceClient { return QueueServiceClient{c} } +// GetFileService returns a FileServiceClient which can operate on the file +// service of the storage account. +func (c Client) GetFileService() FileServiceClient { + return FileServiceClient{c} +} + func (c Client) createAuthorizationHeader(canonicalizedString string) string { signature := c.computeHmac256(canonicalizedString) return fmt.Sprintf("%s %s:%s", "SharedKey", c.accountName, signature) @@ -252,18 +266,22 @@ func (c Client) buildCanonicalizedResource(uri string) (string, error) { } func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string { + contentLength := headers["Content-Length"] + if contentLength == "0" { + contentLength = "" + } canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", verb, headers["Content-Encoding"], headers["Content-Language"], - headers["Content-Length"], + contentLength, headers["Content-MD5"], headers["Content-Type"], headers["Date"], - headers["If-Modified-Singe"], + headers["If-Modified-Since"], headers["If-Match"], headers["If-None-Match"], - headers["If-Unmodified-Singe"], + headers["If-Unmodified-Since"], headers["Range"], c.buildCanonicalizedHeader(headers), canonicalizedResource) @@ -283,6 +301,20 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader } req, err := http.NewRequest(verb, url, body) + if err != nil { + return nil, errors.New("azure/storage: error creating request: " + err.Error()) + } + + if clstr, ok := headers["Content-Length"]; ok { + // content length header is being signed, but completely ignored by golang. + // instead we have to use the ContentLength property on the request struct + // (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and + // https://golang.org/src/net/http/transfer.go?s=1739:2467#L49) + req.ContentLength, err = strconv.ParseInt(clstr, 10, 64) + if err != nil { + return nil, err + } + } for k, v := range headers { req.Header.Add(k, v) } @@ -344,7 +376,8 @@ func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStor } func (e AzureStorageServiceError) Error() string { - return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s", e.StatusCode, e.Code, e.Message, e.RequestID) + return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s", + e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue) } // checkRespCode returns UnexpectedStatusError if the given response code is not diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go new file mode 100644 index 000000000000..ede4e21be5c6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -0,0 +1,91 @@ +package storage + +import ( + "fmt" + "net/http" + "net/url" +) + +// FileServiceClient contains operations for Microsoft Azure File Service. +type FileServiceClient struct { + client Client +} + +// pathForFileShare returns the URL path segment for a File Share resource +func pathForFileShare(name string) string { + return fmt.Sprintf("/%s", name) +} + +// CreateShare operation creates a new share under the specified account. If the +// share with the same name already exists, the operation fails. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx +func (f FileServiceClient) CreateShare(name string) error { + resp, err := f.createShare(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CreateShareIfNotExists creates a new share under the specified account if +// it does not exist. Returns true if container is newly created or false if +// container already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx +func (f FileServiceClient) CreateShareIfNotExists(name string) (bool, error) { + resp, err := f.createShare(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +// CreateShare creates a Azure File Share and returns its response +func (f FileServiceClient) createShare(name string) (*storageResponse, error) { + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) + headers := f.client.getStandardHeaders() + return f.client.exec("PUT", uri, headers, nil) +} + +// DeleteShare operation marks the specified share for deletion. The share +// and any files contained within it are later deleted during garbage +// collection. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx +func (f FileServiceClient) DeleteShare(name string) error { + resp, err := f.deleteShare(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteShareIfExists operation marks the specified share for deletion if it +// exists. The share and any files contained within it are later deleted during +// garbage collection. Returns true if share existed and deleted with this call, +// false otherwise. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx +func (f FileServiceClient) DeleteShareIfExists(name string) (bool, error) { + resp, err := f.deleteShare(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// deleteShare makes the call to Delete Share operation endpoint and returns +// the response +func (f FileServiceClient) deleteShare(name string) (*storageResponse, error) { + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) + return f.client.exec("DELETE", uri, f.client.getStandardHeaders(), nil) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go index fa017f4c83e3..3ecf4aca0d21 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -6,6 +6,13 @@ import ( "net/http" "net/url" "strconv" + "strings" +) + +const ( + // casing is per Golang's http.Header canonicalizing the header names. + approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" + userDefinedMetadataHeaderPrefix = "X-Ms-Meta-" ) // QueueServiceClient contains operations for Microsoft Azure Queue Storage @@ -111,13 +118,82 @@ type PeekMessageResponse struct { MessageText string `xml:"MessageText"` } +// QueueMetadataResponse represents user defined metadata and queue +// properties on a specific queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +type QueueMetadataResponse struct { + ApproximateMessageCount int + UserDefinedMetadata map[string]string +} + +// SetMetadata operation sets user-defined metadata on the specified queue. +// Metadata is associated with the queue as name-value pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx +func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) + headers := c.client.getStandardHeaders() + for k, v := range metadata { + headers[userDefinedMetadataHeaderPrefix+k] = v + } + + resp, err := c.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// GetMetadata operation retrieves user-defined metadata and queue +// properties on the specified queue. Metadata is associated with +// the queue as name-values pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +// +// Because the way Golang's http client (and http.Header in particular) +// canonicalize header names, the returned metadata names would always +// be all lower case. +func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) { + qm := QueueMetadataResponse{} + qm.UserDefinedMetadata = make(map[string]string) + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) + headers := c.client.getStandardHeaders() + resp, err := c.client.exec("GET", uri, headers, nil) + if err != nil { + return qm, err + } + defer resp.body.Close() + + for k, v := range resp.headers { + if len(v) != 1 { + return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k) + } + + value := v[0] + + if k == approximateMessagesCountHeader { + qm.ApproximateMessageCount, err = strconv.Atoi(value) + if err != nil { + return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value) + } + } else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) { + name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix) + qm.UserDefinedMetadata[strings.ToLower(name)] = value + } + } + + return qm, checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + // CreateQueue operation creates a queue under the given account. // // See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx func (c QueueServiceClient) CreateQueue(name string) error { uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) headers := c.client.getStandardHeaders() - headers["Content-Length"] = "0" resp, err := c.client.exec("PUT", uri, headers, nil) if err != nil { return err diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 000000000000..5f14d1162ed4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 000000000000..1d2c0d8b927e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,127 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with code, message, +// and original errors. Calling Error() will only return the error that is at the end +// of the list. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + if e, ok := origErr.(Error); ok && e != nil { + return e + } + return newBaseError(code, message, origErr) +} + +// NewBatchError returns an baseError with an expectation of an array of errors +func NewBatchError(code, message string, errs []error) BatchError { + return newBaseErrors(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Printf("Error:", err.Error() +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 000000000000..605f73c5d6a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,197 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and err. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the error. +// +// origErr is the error object which will be nested under the new error to be returned. +func newBaseError(code, message string, origErr error) *baseError { + b := &baseError{ + code: code, + message: message, + } + + if origErr != nil { + b.errs = append(b.errs, origErr) + } + + return b +} + +// newBaseErrors returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the error. +// +// origErrs is the error objects which will be nested under the new errors to be returned. +func newBaseErrors(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no error +// was set. This only returns the first element in the list. If the full list is +// needed, use BatchError +func (b baseError) OrigErr() error { + if size := len(b.errs); size > 0 { + return b.errs[0] + } + + return nil +} + +// OrigErrs returns the original errors if one was set. An empty slice is returned if +// no error was set:w +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for request +// status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 000000000000..8429470b9d72 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,100 @@ +package awsutil + +import ( + "io" + "reflect" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + dst.Set(reflect.New(e)) + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 000000000000..59fa4a558a9a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 000000000000..4d2a01e8c41b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, value := range values { + value := reflect.Indirect(value) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 000000000000..0de3eaa0f639 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,103 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 000000000000..b6432f1a1188 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 000000000000..c8d0564d8a8b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,120 @@ +package client + +import ( + "fmt" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint, SigningRegion string +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers, + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFront(logRequest) + c.Handlers.Send.PushBack(logResponse) +} + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + var msg = "no response data" + if r.HTTPResponse != nil { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) + msg = string(dumpedBody) + } else if r.Error != nil { + msg = r.Error.Error() + } + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 000000000000..24d39ce5641e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,45 @@ +package client + +import ( + "math" + "math/rand" + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// service.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() uint { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + delay := int(math.Pow(2, float64(r.RetryCount))) * (rand.Intn(30) + 30) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + if r.HTTPResponse.StatusCode >= 500 { + return true + } + return r.IsErrorRetryable() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 000000000000..4778056ddfda --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/config.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 000000000000..9e83e9260a19 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,311 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// UseServiceDefaultRetries instructs the config to use the service's own default +// number of retries. This will be the default action if Config.MaxRetries +// is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the {defaults.DefaultConfig} structure. +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to retreive + // credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to + // a chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service specific + // configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the request.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for missing + // required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will + // use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata + // client to create a new http.Client. This options is only meaningful if you're not + // already using a custom HTTP client with the SDK. Enabled by default. + // + // Must be set and provided to the session.New() in order to disable the EC2Metadata + // overriding the timeout for default credentials chain. + // + // Example: + // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + SleepDelay func(time.Duration) +} + +// NewConfig returns a new Config pointer that can be chained with builder methods to +// set multiple configuration values inline without using pointers. +// +// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) +// +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 000000000000..d6a7b08dffe4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,357 @@ +package aws + +import "time" + +// String returns a pointer to of the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to of the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to of the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to of the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to of the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to of the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 000000000000..1d3e656fd68e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,139 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ := strconv.ParseInt(slength, 10, 64) + r.HTTPRequest.ContentLength = length + return + } + + var length int64 + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) +}} + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { + var err error + r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) + if err != nil { + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other url redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + } +}} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + r.Config.SleepDelay(r.RetryDelay) + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 000000000000..3b53f5e026a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,144 @@ +package corehandlers + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if r.ParamsFilled() { + v := validator{errors: []string{}} + v.validateAny(reflect.ValueOf(r.Params), "") + + if count := len(v.errors); count > 0 { + format := "%d validation errors:\n- %s" + msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- ")) + r.Error = awserr.New("InvalidParameter", msg, nil) + } + } +}} + +// A validator validates values. Collects validations errors which occurs. +type validator struct { + errors []string +} + +// validateAny will validate any struct, slice or map type. All validations +// are also performed recursively for nested types. +func (v *validator) validateAny(value reflect.Value, path string) { + value = reflect.Indirect(value) + if !value.IsValid() { + return + } + + switch value.Kind() { + case reflect.Struct: + v.validateStruct(value, path) + case reflect.Slice: + for i := 0; i < value.Len(); i++ { + v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i)) + } + case reflect.Map: + for _, n := range value.MapKeys() { + v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String())) + } + } +} + +// validateStruct will validate the struct value's fields. If the structure has +// nested types those types will be validated also. +func (v *validator) validateStruct(value reflect.Value, path string) { + prefix := "." + if path == "" { + prefix = "" + } + + for i := 0; i < value.Type().NumField(); i++ { + f := value.Type().Field(i) + if strings.ToLower(f.Name[0:1]) == f.Name[0:1] { + continue + } + fvalue := value.FieldByName(f.Name) + + err := validateField(f, fvalue, validateFieldRequired, validateFieldMin) + if err != nil { + v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name)) + continue + } + + v.validateAny(fvalue, path+prefix+f.Name) + } +} + +type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error + +func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error { + for _, fn := range funcs { + if err := fn(f, fvalue); err != nil { + return err + } + } + return nil +} + +// Validates that a field has a valid value provided for required fields. +func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error { + if f.Tag.Get("required") == "" { + return nil + } + + switch fvalue.Kind() { + case reflect.Ptr, reflect.Slice, reflect.Map: + if fvalue.IsNil() { + return fmt.Errorf("missing required parameter") + } + default: + if !fvalue.IsValid() { + return fmt.Errorf("missing required parameter") + } + } + return nil +} + +// Validates that if a value is provided for a field, that value must be at +// least a minimum length. +func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error { + minStr := f.Tag.Get("min") + if minStr == "" { + return nil + } + min, _ := strconv.ParseInt(minStr, 10, 64) + + kind := fvalue.Kind() + if kind == reflect.Ptr { + if fvalue.IsNil() { + return nil + } + fvalue = fvalue.Elem() + } + + switch fvalue.Kind() { + case reflect.String: + if int64(fvalue.Len()) < min { + return fmt.Errorf("field too short, minimum length %d", min) + } + case reflect.Slice, reflect.Map: + if fvalue.IsNil() { + return nil + } + if int64(fvalue.Len()) < min { + return fmt.Errorf("field too short, minimum length %d", min) + } + + // TODO min can also apply to number minimum value. + + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 000000000000..857311f64cc4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// vai the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := NewChainCredentials( +// []Provider{ +// &EnvProvider{}, +// &EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(&aws.Config{Credentials: creds}) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 000000000000..7b8ebf5f9d87 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,223 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewCredentials(&EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Refresh returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 000000000000..36f8add35edc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshalling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "failed to list EC2 Roles", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read list of EC2 Roles", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 Role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 000000000000..96655bc46ae3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,77 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 000000000000..7fc91d9d2047 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 000000000000..7fb7cbf0db00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + // + // @readonly + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if p.Filename == "" { + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { + return p.Filename, nil + } + + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 000000000000..71189e733dc9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,48 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set pragmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + s.Value.ProviderName = StaticProviderName + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 000000000000..42c883aa9fa6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,96 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithSleepDelay(time.Sleep) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) + + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), + ExpiryWindow: 5 * time.Minute, + }, + }}) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 000000000000..e5137ca17da9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,43 @@ +package ec2metadata + +import ( + "path" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// GetMetadata uses the path provided to request +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 000000000000..9e16c1cf7e3b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,117 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +package ec2metadata + +import ( + "io/ioutil" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + } + + data := r.Data.(*metadataOutput) + data.Content = string(b) +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + _, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + } + + // TODO extract the error... +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 000000000000..57663616868f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 000000000000..f5369487384e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,98 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nill, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 000000000000..3e90a7976aef --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,140 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + var n HandlerList + n.list = append([]NamedHandler{}, l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = []NamedHandler{} +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.list = append(l.list, NamedHandler{"__anonymous", f}) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + l.list = append(l.list, n) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + l.list = append([]NamedHandler{n}, l.list...) +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + newlist := []NamedHandler{} + for _, m := range l.list { + if m.Name != n.Name { + newlist = append(newlist, m) + } + } + l.list = newlist +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for _, f := range l.list { + f.Fn(r) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 000000000000..61bb4a8ca757 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,294 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + + built bool +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator +} + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + p := operation.HTTPPath + if p == "" { + p = "/" + } + + httpReq, _ := http.NewRequest(method, "", nil) + httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p) + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: nil, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.HTTPRequest.Body = ioutil.NopCloser(reader) + r.Body = reader +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.NotHoist = false + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Error = nil + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + r.built = true + } + + return r.Error +} + +// Sign will sign the request retuning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +func (r *Request) Send() error { + for { + r.Sign() + if r.Error != nil { + return r.Error + } + + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // Re-seek the body back to the original point in for a retry so that + // send will send the body's contents again in the upcoming request. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, r.Error) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.UnmarshalError.Run(r) + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 000000000000..2939ec473f2b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,104 @@ +package request + +import ( + "reflect" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +//type Paginater interface { +// HasNextPage() bool +// NextPage() *Request +// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error +//} + +// HasNextPage returns true if this request has more pages of data available. +func (r *Request) HasNextPage() bool { + return len(r.nextPageTokens()) > 0 +} + +// nextPageTokens returns the tokens to use when asking for the next page of +// data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + tokenAdded = true + } else { + tokens = append(tokens, nil) + } + } + if !tokenAdded { + return nil + } + + return tokens +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +func (r *Request) NextPage() *Request { + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 000000000000..ab6fff5ac842 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,82 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once + "TooManyRequestsException": {}, // Lambda functions +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +func (r *Request) IsErrorRetryable() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeRetryable(err.Code()) + } + } + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +func (r *Request) IsErrorExpired() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeExpiredCreds(err.Code()) + } + } + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 000000000000..47e4536ff1cb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,120 @@ +// Package session provides a way to create service clients with shared configuration +// and handlers. +// +// Generally this package should be used instead of the `defaults` package. +// +// A session should be used to share configurations and request handlers between multiple +// service clients. When service clients need specific configuration aws.Config can be +// used to provide additional configuration directly to the service client. +package session + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the session concurrently. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided Configs +// on top of the SDK's default configurations. Once the session is created it +// can be mutated to modify Configs or Handlers. The session is safe to be read +// concurrently, but it should not be written to concurrently. +// +// Example: +// // Create a session with the default config and request handlers. +// sess := session.New() +// +// // Create a session with a custom region +// sess := session.New(&aws.Config{Region: aws.String("us-east-1")}) +// +// // Create a session, and add additional handlers for all service +// // clients created with the session to inherit. Adds logging handler. +// sess := session.New() +// sess.Handlers.Send.PushFront(func(r *request.Request) { +// // Log every request made and its payload +// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) +// }) +// +// // Create a S3 client instance from a session +// sess := session.New() +// svc := s3.New(sess) +func New(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the session's copied config. +// +// Example: +// // Create a copy of the current session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2"}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +// +// Example: +// sess := session.New() +// s3.New(sess) +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + endpoint, signingRegion := endpoints.NormalizeEndpoint( + aws.StringValue(s.Config.Endpoint), serviceName, + aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL)) + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: endpoint, + SigningRegion: signingRegion, + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/types.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 000000000000..0f067c57f4e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,88 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + + expLen := pos + int64(len(p)) + if int64(len(b.buf)) < expLen { + newBuf := make([]byte, expLen) + copy(newBuf, b.buf) + b.buf = newBuf + } + copy(b.buf[pos:], p) + return len(p), nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf[:len(b.buf):len(b.buf)] +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/version.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 000000000000..ad215ea8ebca --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.1.0" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go new file mode 100644 index 000000000000..2b279e65999a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go @@ -0,0 +1,65 @@ +// Package endpoints validates regional endpoints for services. +package endpoints + +//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go +//go:generate gofmt -s -w endpoints_map.go + +import ( + "fmt" + "regexp" + "strings" +) + +// NormalizeEndpoint takes and endpoint and service API information to return a +// normalized endpoint and signing region. If the endpoint is not an empty string +// the service name and region will be used to look up the service's API endpoint. +// If the endpoint is provided the scheme will be added if it is not present. +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { + if endpoint == "" { + return EndpointForRegion(serviceName, region, disableSSL) + } + + return AddScheme(endpoint, disableSSL), "" +} + +// EndpointForRegion returns an endpoint and its signing region for a service and region. +// if the service and region pair are not found endpoint and signingRegion will be empty. +func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { + derivedKeys := []string{ + region + "/" + svcName, + region + "/*", + "*/" + svcName, + "*/*", + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + + endpoint = ep + signingRegion = val.SigningRegion + break + } + } + + return AddScheme(endpoint, disableSSL), signingRegion +} + +// Regular expression to determine if the endpoint string is prefixed with a scheme. +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. +func AddScheme(endpoint string, disableSSL bool) string { + if endpoint != "" && !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json new file mode 100644 index 000000000000..7819eedc39e3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json @@ -0,0 +1,92 @@ +{ + "version": 2, + "endpoints": { + "*/*": { + "endpoint": "{service}.{region}.amazonaws.com" + }, + "cn-north-1/*": { + "endpoint": "{service}.{region}.amazonaws.com.cn", + "signatureVersion": "v4" + }, + "us-gov-west-1/iam": { + "endpoint": "iam.us-gov.amazonaws.com" + }, + "us-gov-west-1/sts": { + "endpoint": "sts.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "*/cloudfront": { + "endpoint": "cloudfront.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/cloudsearchdomain": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/data.iot": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/ec2metadata": { + "endpoint": "http://169.254.169.254/latest", + "signingRegion": "us-east-1" + }, + "*/iam": { + "endpoint": "iam.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/importexport": { + "endpoint": "importexport.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/route53": { + "endpoint": "route53.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/sts": { + "endpoint": "sts.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/waf": { + "endpoint": "waf.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/sdb": { + "endpoint": "sdb.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/s3": { + "endpoint": "s3.amazonaws.com" + }, + "us-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-west-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-northeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-northeast-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "sa-east-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-central-1/s3": { + "endpoint": "{service}.{region}.amazonaws.com", + "signatureVersion": "v4" + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go new file mode 100644 index 000000000000..9b2e1b699b24 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -0,0 +1,104 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/cloudfront": { + Endpoint: "cloudfront.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudsearchdomain": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/data.iot": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + SigningRegion: "us-east-1", + }, + "*/iam": { + Endpoint: "iam.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/importexport": { + Endpoint: "importexport.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/route53": { + Endpoint: "route53.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/sts": { + Endpoint: "sts.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/waf": { + Endpoint: "waf.amazonaws.com", + SigningRegion: "us-east-1", + }, + "ap-northeast-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-northeast-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "cn-north-1/*": { + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "eu-central-1/s3": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "eu-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "sa-east-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-east-1/s3": { + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": { + Endpoint: "sdb.amazonaws.com", + SigningRegion: "us-east-1", + }, + "us-gov-west-1/iam": { + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": { + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + "us-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-west-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + }, +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 000000000000..53831dff9842 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 000000000000..56d69db05c17 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialisation of AWS query requests, and responses. +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 000000000000..60ea0bd1e5fc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,230 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".member" + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 000000000000..a3ea40955d7c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,35 @@ +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 000000000000..b8e3c7a31254 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,40 @@ +package query + +import ( + "encoding/xml" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err) + } else { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 000000000000..5469edb60b1b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,257 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value) { + query := r.HTTPRequest.URL.Query() + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if m.Kind() == reflect.Ptr { + m = m.Elem() + } + if !m.IsValid() { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name) + case "querystring": + err = buildQueryString(query, m, name) + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string) error { + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key)) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string) error { + value, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + uri := u.Path + uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) + uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) + u.Path = uri + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func updatePath(url *url.URL, urlPath string) { + scheme, query := url.Scheme, url.RawQuery + + hasSlash := strings.HasSuffix(urlPath, "/") + + // clean up path + urlPath = path.Clean(urlPath) + if hasSlash && !strings.HasSuffix(urlPath, "/") { + urlPath += "/" + } + + // get formatted URL minus scheme so we can build this into Opaque + url.Scheme, url.Path, url.RawQuery = "", "", "" + s := url.String() + url.Scheme = scheme + url.RawQuery = query + + // build opaque URI + url.Opaque = s + urlPath +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + buf.WriteByte('%') + buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16))) + } + } + return buf.String() +} + +func convertType(v reflect.Value) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 000000000000..4366de2e1e8f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 000000000000..ba46f5bc9fae --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,193 @@ +package rest + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadSeeker": + payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) + case "aws.ReadSeekCloser", "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + default: + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string) error { + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 000000000000..c74088bfe807 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,69 @@ +// Package restxml provides RESTful XML serialisation of AWS +// requests and responses. +package restxml + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 000000000000..da1a68111db4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 000000000000..ceb4132c5355 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,293 @@ +// Package xmlutil provides XML serialisation of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 000000000000..49f291a857b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,260 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, _ := XMLToStruct(d, nil) + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err := parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + for _, a := range node.Attr { + if name == a.Name.Local { + // turn this into a text node for de-serializing + elems = []*XMLNode{{Text: a.Value}} + } + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 000000000000..72c198a9d8d0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,105 @@ +package xmlutil + +import ( + "encoding/xml" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if tok == nil || err == io.EOF { + break + } + if err != nil { + return out, err + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + if e != nil { + return out, e + } + node.Name = typed.Name + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + } + } + return out, nil +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go new file mode 100644 index 000000000000..244c86da0543 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go new file mode 100644 index 000000000000..14b0c6616112 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go @@ -0,0 +1,438 @@ +// Package v4 implements signing for AWS V4 signer +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Content-Length": struct{}{}, + "User-Agent": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +type signer struct { + Request *http.Request + Time time.Time + ExpireTime time.Duration + ServiceName string + Region string + CredValues credentials.Value + Credentials *credentials.Credentials + Query url.Values + Body io.ReadSeeker + Debug aws.LogLevelType + Logger aws.Logger + + isPresign bool + formattedTime string + formattedShortTime string + + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string + notHoist bool + signedHeaderVals http.Header +} + +// Sign requests with signature version 4. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func Sign(req *request.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + s := signer{ + Request: req.HTTPRequest, + Time: req.Time, + ExpireTime: req.ExpireTime, + Query: req.HTTPRequest.URL.Query(), + Body: req.Body, + ServiceName: name, + Region: region, + Credentials: req.Config.Credentials, + Debug: req.Config.LogLevel.Value(), + Logger: req.Config.Logger, + notHoist: req.NotHoist, + } + + req.Error = s.sign() + req.SignedHeaderVals = s.signedHeaderVals +} + +func (v4 *signer) sign() error { + if v4.ExpireTime != 0 { + v4.isPresign = true + } + + if v4.isRequestSigned() { + if !v4.Credentials.IsExpired() { + // If the request is already signed, and the credentials have not + // expired yet ignore the signing request. + return nil + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + if v4.isPresign { + v4.removePresign() + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + v4.Request.URL.RawQuery = v4.Query.Encode() + } + } + + var err error + v4.CredValues, err = v4.Credentials.Get() + if err != nil { + return err + } + + if v4.isPresign { + v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if v4.CredValues.SessionToken != "" { + v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } else { + v4.Query.Del("X-Amz-Security-Token") + } + } else if v4.CredValues.SessionToken != "" { + v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } + + v4.build() + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo() + } + + return nil +} + +const logSignInfoMsg = `DEBUG: Request Signiture: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *signer) logSigningInfo() { + signedURLMsg := "" + if v4.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (v4 *signer) build() { + + v4.buildTime() // no depends + v4.buildCredentialString() // no depends + + unsignedHeaders := v4.Request.Header + if v4.isPresign { + if !v4.notHoist { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + v4.Query[k] = urlValues[k] + } + } + } + + v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + v4.buildCanonicalString() // depends on canon headers / signed headers + v4.buildStringToSign() // depends on canon string + v4.buildSignature() // depends on string to sign + + if v4.isPresign { + v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, + "SignedHeaders=" + v4.signedHeaders, + "Signature=" + v4.signature, + } + v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (v4 *signer) buildTime() { + v4.formattedTime = v4.Time.UTC().Format(timeFormat) + v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) + + if v4.isPresign { + duration := int64(v4.ExpireTime / time.Second) + v4.Query.Set("X-Amz-Date", v4.formattedTime) + v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) + } +} + +func (v4 *signer) buildCredentialString() { + v4.credentialString = strings.Join([]string{ + v4.formattedShortTime, + v4.Region, + v4.ServiceName, + "aws4_request", + }, "/") + + if v4.isPresign { + v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + + lowerCaseKey := strings.ToLower(k) + headers = append(headers, lowerCaseKey) + + if v4.signedHeaderVals == nil { + v4.signedHeaderVals = make(http.Header) + } + v4.signedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + v4.signedHeaders = strings.Join(headers, ";") + + if v4.isPresign { + v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + v4.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",") + } + } + + v4.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (v4 *signer) buildCanonicalString() { + v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) + uri := v4.Request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = v4.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + if v4.ServiceName != "s3" { + uri = rest.EscapePath(uri, false) + } + + v4.canonicalString = strings.Join([]string{ + v4.Request.Method, + uri, + v4.Request.URL.RawQuery, + v4.canonicalHeaders + "\n", + v4.signedHeaders, + v4.bodyDigest(), + }, "\n") +} + +func (v4 *signer) buildStringToSign() { + v4.stringToSign = strings.Join([]string{ + authHeaderPrefix, + v4.formattedTime, + v4.credentialString, + hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), + }, "\n") +} + +func (v4 *signer) buildSignature() { + secret := v4.CredValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) + region := makeHmac(date, []byte(v4.Region)) + service := makeHmac(region, []byte(v4.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(v4.stringToSign)) + v4.signature = hex.EncodeToString(signature) +} + +func (v4 *signer) bodyDigest() string { + hash := v4.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if v4.isPresign && v4.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if v4.Body == nil { + hash = hex.EncodeToString(makeSha256([]byte{})) + } else { + hash = hex.EncodeToString(makeSha256Reader(v4.Body)) + } + v4.Request.Header.Add("X-Amz-Content-Sha256", hash) + } + return hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (v4 *signer) isRequestSigned() bool { + if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { + return true + } + if v4.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (v4 *signer) removePresign() { + v4.Query.Del("X-Amz-Algorithm") + v4.Query.Del("X-Amz-Signature") + v4.Query.Del("X-Amz-Security-Token") + v4.Query.Del("X-Amz-Date") + v4.Query.Del("X-Amz-Expires") + v4.Query.Del("X-Amz-Credential") + v4.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go new file mode 100644 index 000000000000..b51e9449c105 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go @@ -0,0 +1,134 @@ +package waiter + +import ( + "fmt" + "reflect" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides a collection of configuration values to setup a generated +// waiter code with. +type Config struct { + Name string + Delay int + MaxAttempts int + Operation string + Acceptors []WaitAcceptor +} + +// A WaitAcceptor provides the information needed to wait for an API operation +// to complete. +type WaitAcceptor struct { + Expected interface{} + Matcher string + State string + Argument string +} + +// A Waiter provides waiting for an operation to complete. +type Waiter struct { + Config + Client interface{} + Input interface{} +} + +// Wait waits for an operation to complete, expire max attempts, or fail. Error +// is returned if the operation fails. +func (w *Waiter) Wait() error { + client := reflect.ValueOf(w.Client) + in := reflect.ValueOf(w.Input) + method := client.MethodByName(w.Config.Operation + "Request") + + for i := 0; i < w.MaxAttempts; i++ { + res := method.Call([]reflect.Value{in}) + req := res[0].Interface().(*request.Request) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) + + err := req.Send() + for _, a := range w.Acceptors { + result := false + var vals []interface{} + switch a.Matcher { + case "pathAll", "path": + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case "pathAny": + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case "status": + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case "error": + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + case "pathList": + // ignored matcher + default: + logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s", + w.Config.Operation, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + continue + } + + switch a.State { + case "success": + // waiter completed + return nil + case "failure": + // Waiter failure state triggered + return awserr.New("ResourceNotReady", + fmt.Sprintf("failed waiting for successful resource state"), err) + case "retry": + // clear the error and retry the operation + err = nil + default: + logf(client, "WARNING: Waiter for %s encountered unexpected state: %s", + w.Config.Operation, a.State) + } + } + if err != nil { + return err + } + + time.Sleep(time.Second * time.Duration(w.Delay)) + } + + return awserr.New("ResourceNotReady", + fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) +} + +func logf(client reflect.Value, msg string, args ...interface{}) { + cfgVal := client.FieldByName("Config") + if !cfgVal.IsValid() { + return + } + if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil { + cfg.Logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go new file mode 100644 index 000000000000..8d392eb89fc7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go @@ -0,0 +1,210 @@ +package sign + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/url" + "strings" + "time" +) + +// An AWSEpochTime wraps a time value providing JSON serialization needed for +// AWS Policy epoch time fields. +type AWSEpochTime struct { + time.Time +} + +// NewAWSEpochTime returns a new AWSEpochTime pointer wrapping the Go time provided. +func NewAWSEpochTime(t time.Time) *AWSEpochTime { + return &AWSEpochTime{t} +} + +// MarshalJSON serializes the epoch time as AWS Profile epoch time. +func (t AWSEpochTime) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`{"AWS:EpochTime":%d}`, t.UTC().Unix())), nil +} + +// An IPAddress wraps an IPAddress source IP providing JSON serialization information +type IPAddress struct { + SourceIP string `json:"AWS:SourceIp"` +} + +// A Condition defines the restrictions for how a signed URL can be used. +type Condition struct { + // Optional IP address mask the signed URL must be requested from. + IPAddress *IPAddress `json:"IpAddress,omitempty"` + + // Optional date that the signed URL cannot be used until. It is invalid + // to make requests with the signed URL prior to this date. + DateGreaterThan *AWSEpochTime `json:",omitempty"` + + // Required date that the signed URL will expire. A DateLessThan is required + // sign cloud front URLs + DateLessThan *AWSEpochTime `json:",omitempty"` +} + +// A Statement is a collection of conditions for resources +type Statement struct { + // The Web or RTMP resource the URL will be signed for + Resource string + + // The set of conditions for this resource + Condition Condition +} + +// A Policy defines the resources that a signed will be signed for. +// +// See the following page for more information on how policies are constructed. +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html#private-content-custom-policy-statement +type Policy struct { + // List of resource and condition statements. + // Signed URLs should only provide a single statement. + Statements []Statement `json:"Statement"` +} + +// Override for testing to mock out usage of crypto/rand.Reader +var randReader = rand.Reader + +// Sign will sign a policy using an RSA private key. It will return a base 64 +// encoded signature and policy if no error is encountered. +// +// The signature and policy should be added to the signed URL following the +// guidelines in: +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-signed-urls.html +func (p *Policy) Sign(privKey *rsa.PrivateKey) (b64Signature, b64Policy []byte, err error) { + if err = p.Validate(); err != nil { + return nil, nil, err + } + + // Build and escape the policy + b64Policy, jsonPolicy, err := encodePolicy(p) + if err != nil { + return nil, nil, err + } + awsEscapeEncoded(b64Policy) + + // Build and escape the signature + b64Signature, err = signEncodedPolicy(randReader, jsonPolicy, privKey) + if err != nil { + return nil, nil, err + } + awsEscapeEncoded(b64Signature) + + return b64Signature, b64Policy, nil +} + +// Validate verifies that the policy is valid and usable, and returns an +// error if there is a problem. +func (p *Policy) Validate() error { + if len(p.Statements) == 0 { + return fmt.Errorf("at least one policy statement is required") + } + for i, s := range p.Statements { + if s.Resource == "" { + return fmt.Errorf("statement at index %d does not have a resource", i) + } + } + + return nil +} + +// CreateResource constructs, validates, and returns a resource URL string. An +// error will be returned if unable to create the resource string. +func CreateResource(scheme, u string) (string, error) { + scheme = strings.ToLower(scheme) + + if scheme == "http" || scheme == "https" { + return u, nil + } + + if scheme == "rtmp" { + parsed, err := url.Parse(u) + if err != nil { + return "", fmt.Errorf("unable to parse rtmp URL, err: %s", err) + } + + rtmpURL := strings.TrimLeft(parsed.Path, "/") + if parsed.RawQuery != "" { + rtmpURL = fmt.Sprintf("%s?%s", rtmpURL, parsed.RawQuery) + } + + return rtmpURL, nil + } + + return "", fmt.Errorf("invalid URL scheme must be http, https, or rtmp. Provided: %s", scheme) +} + +// NewCannedPolicy returns a new Canned Policy constructed using the resource +// and expires time. This can be used to generate the basic model for a Policy +// that can be then augmented with additional conditions. +// +// See the following page for more information on how policies are constructed. +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html#private-content-custom-policy-statement +func NewCannedPolicy(resource string, expires time.Time) *Policy { + return &Policy{ + Statements: []Statement{ + { + Resource: resource, + Condition: Condition{ + DateLessThan: NewAWSEpochTime(expires), + }, + }, + }, + } +} + +// encodePolicy encodes the Policy as JSON and also base 64 encodes it. +func encodePolicy(p *Policy) (b64Policy, jsonPolicy []byte, err error) { + jsonPolicy, err = json.Marshal(p) + if err != nil { + return nil, nil, fmt.Errorf("failed to encode policy, %s", err.Error()) + } + + // Remove leading and trailing white space, JSON encoding will note include + // whitespace within the encoding. + jsonPolicy = bytes.TrimSpace(jsonPolicy) + + b64Policy = make([]byte, base64.StdEncoding.EncodedLen(len(jsonPolicy))) + base64.StdEncoding.Encode(b64Policy, jsonPolicy) + return b64Policy, jsonPolicy, nil +} + +// signEncodedPolicy will sign and base 64 encode the JSON encoded policy. +func signEncodedPolicy(randReader io.Reader, jsonPolicy []byte, privKey *rsa.PrivateKey) ([]byte, error) { + hash := sha1.New() + if _, err := bytes.NewReader(jsonPolicy).WriteTo(hash); err != nil { + return nil, fmt.Errorf("failed to calculate signing hash, %s", err.Error()) + } + + sig, err := rsa.SignPKCS1v15(randReader, privKey, crypto.SHA1, hash.Sum(nil)) + if err != nil { + return nil, fmt.Errorf("failed to sign policy, %s", err.Error()) + } + + b64Sig := make([]byte, base64.StdEncoding.EncodedLen(len(sig))) + base64.StdEncoding.Encode(b64Sig, sig) + return b64Sig, nil +} + +// special characters to be replaced with awsEscapeEncoded +var invalidEncodedChar = map[byte]byte{ + '+': '-', + '=': '_', + '/': '~', +} + +// awsEscapeEncoded will replace base64 encoding's special characters to be URL safe. +func awsEscapeEncoded(b []byte) { + for i, v := range b { + if r, ok := invalidEncodedChar[v]; ok { + b[i] = r + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go new file mode 100644 index 000000000000..ffb3c3a751d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go @@ -0,0 +1,68 @@ +package sign + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "os" +) + +// LoadPEMPrivKeyFile reads a PEM encoded RSA private key from the file name. +// A new RSA private key will be returned if no error. +func LoadPEMPrivKeyFile(name string) (*rsa.PrivateKey, error) { + file, err := os.Open(name) + if err != nil { + return nil, err + } + defer file.Close() + + return LoadPEMPrivKey(file) +} + +// LoadPEMPrivKey reads a PEM encoded RSA private key from the io.Reader. +// A new RSA private key will be returned if no error. +func LoadPEMPrivKey(reader io.Reader) (*rsa.PrivateKey, error) { + block, err := loadPem(reader) + if err != nil { + return nil, err + } + + return x509.ParsePKCS1PrivateKey(block.Bytes) +} + +// LoadEncryptedPEMPrivKey decrypts the PEM encoded private key using the +// password provided returning a RSA private key. If the PEM data is invalid, +// or unable to decrypt an error will be returned. +func LoadEncryptedPEMPrivKey(reader io.Reader, password []byte) (*rsa.PrivateKey, error) { + block, err := loadPem(reader) + if err != nil { + return nil, err + } + + decryptedBlock, err := x509.DecryptPEMBlock(block, password) + if err != nil { + return nil, err + } + + return x509.ParsePKCS1PrivateKey(decryptedBlock) +} + +func loadPem(reader io.Reader) (*pem.Block, error) { + b, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + block, _ := pem.Decode(b) + if block == nil { + // pem.Decode will set block to nil if there is no PEM data in the input + // the second parameter will contain the provided bytes that failed + // to be decoded. + return nil, fmt.Errorf("no valid PEM data provided") + } + + return block, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go new file mode 100644 index 000000000000..7138e22fa021 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go @@ -0,0 +1,30 @@ +package sign + +import ( + "bytes" + "encoding/binary" + "math/rand" +) + +// A randomReader wraps a math/rand.Rand within an reader so that it can used +// as a predictable testing replacement for crypto/rand.Reader +type randomReader struct { + b *bytes.Buffer + r *rand.Rand +} + +// newRandomReader returns a new instance of the random reader +func newRandomReader(r *rand.Rand) *randomReader { + return &randomReader{b: &bytes.Buffer{}, r: r} +} + +// Read will read random bytes from up to the length of b. +func (m *randomReader) Read(b []byte) (int, error) { + for i := 0; i < len(b); { + binary.Write(m.b, binary.LittleEndian, m.r.Int63()) + n, _ := m.b.Read(b[i:]) + i += n + } + + return len(b), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go new file mode 100644 index 000000000000..62406bbb7b4c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go @@ -0,0 +1,205 @@ +// Package sign provides utilities to generate signed URLs for Amazon CloudFront. +// +// More information about signed URLs and their structure can be found at: +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html +// +// To sign a URL create a URLSigner with your private key and credential pair key ID. +// Once you have a URLSigner instance you can call Sign or SignWithPolicy to +// sign the URLs. +// +// Example: +// +// // Sign URL to be valid for 1 hour from now. +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.Sign(rawURL, time.Now().Add(1*time.Hour)) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +package sign + +import ( + "crypto/rsa" + "fmt" + "net/url" + "strings" + "time" +) + +// An URLSigner provides URL signing utilities to sign URLs for Amazon CloudFront +// resources. Using a private key and Credential Key Pair key ID the URLSigner +// only needs to be created once per Credential Key Pair key ID and private key. +// +// The signer is safe to use concurrently. +type URLSigner struct { + keyID string + privKey *rsa.PrivateKey +} + +// NewURLSigner constructs and returns a new URLSigner to be used to for signing +// Amazon CloudFront URL resources with. +func NewURLSigner(keyID string, privKey *rsa.PrivateKey) *URLSigner { + return &URLSigner{ + keyID: keyID, + privKey: privKey, + } +} + +// Sign will sign a single URL to expire at the time of expires sign using the +// Amazon CloudFront default Canned Policy. The URL will be signed with the +// private key and Credential Key Pair Key ID previously provided to URLSigner. +// +// This is the default method of signing Amazon CloudFront URLs. If extra policy +// conditions are need other than URL expiry use SignWithPolicy instead. +// +// Example: +// +// // Sign URL to be valid for 1 hour from now. +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.Sign(rawURL, time.Now().Add(1*time.Hour)) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +func (s URLSigner) Sign(url string, expires time.Time) (string, error) { + scheme, cleanedURL, err := cleanURLScheme(url) + if err != nil { + return "", err + } + + resource, err := CreateResource(scheme, url) + if err != nil { + return "", err + } + + return signURL(scheme, cleanedURL, s.keyID, NewCannedPolicy(resource, expires), false, s.privKey) +} + +// SignWithPolicy will sign a URL with the Policy provided. The URL will be +// signed with the private key and Credential Key Pair Key ID previously provided to URLSigner. +// +// Use this signing method if you are looking to sign a URL with more than just +// the URL's expiry time, or reusing Policies between multiple URL signings. +// If only the expiry time is needed you can use Sign and provide just the +// URL's expiry time. A minimum of at least one policy statement is required for a signed URL. +// +// Note: It is not safe to use Polices between multiple signers concurrently +// +// Example: +// +// // Sign URL to be valid for 30 minutes from now, expires one hour from now, and +// // restricted to the 192.0.2.0/24 IP address range. +// policy := &sign.Policy{ +// Statements: []Statement{ +// { +// Resource: rawURL, +// Condition: Condition{ +// // Optional IP source address range +// IPAddress: &IPAddress{SourceIP: "192.0.2.0/24"}, +// // Optional date URL is not valid until +// DateGreaterThan: &AWSEpochTime{time.Now().Add(30 * time.Minute)}, +// // Required date the URL will expire after +// DateLessThan: &AWSEpochTime{time.Now().Add(1 * time.Hour)}, +// } +// } +// } +// } +// +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.SignWithPolicy(rawURL, policy) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +func (s URLSigner) SignWithPolicy(url string, p *Policy) (string, error) { + scheme, cleanedURL, err := cleanURLScheme(url) + if err != nil { + return "", err + } + + return signURL(scheme, cleanedURL, s.keyID, p, true, s.privKey) +} + +func signURL(scheme, url, keyID string, p *Policy, customPolicy bool, privKey *rsa.PrivateKey) (string, error) { + // Validation URL elements + if err := validateURL(url); err != nil { + return "", err + } + + b64Signature, b64Policy, err := p.Sign(privKey) + if err != nil { + return "", err + } + + // build and return signed URL + builtURL := buildSignedURL(url, keyID, p, customPolicy, b64Policy, b64Signature) + if scheme == "rtmp" { + return buildRTMPURL(builtURL) + } + + return builtURL, nil +} + +func buildSignedURL(baseURL, keyID string, p *Policy, customPolicy bool, b64Policy, b64Signature []byte) string { + pred := "?" + if strings.Contains(baseURL, "?") { + pred = "&" + } + signedURL := baseURL + pred + + if customPolicy { + signedURL += "Policy=" + string(b64Policy) + } else { + signedURL += fmt.Sprintf("Expires=%d", p.Statements[0].Condition.DateLessThan.UTC().Unix()) + } + signedURL += fmt.Sprintf("&Signature=%s&Key-Pair-Id=%s", string(b64Signature), keyID) + + return signedURL +} + +func buildRTMPURL(u string) (string, error) { + parsed, err := url.Parse(u) + if err != nil { + return "", fmt.Errorf("unable to parse rtmp signed URL, err: %s", err) + } + + rtmpURL := strings.TrimLeft(parsed.Path, "/") + if parsed.RawQuery != "" { + rtmpURL = fmt.Sprintf("%s?%s", rtmpURL, parsed.RawQuery) + } + + return rtmpURL, nil +} + +func cleanURLScheme(u string) (scheme, cleanedURL string, err error) { + parts := strings.SplitN(u, "://", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("invalid URL, missing scheme and domain/path") + } + scheme = strings.Replace(parts[0], "*", "", 1) + cleanedURL = fmt.Sprintf("%s://%s", scheme, parts[1]) + + return strings.ToLower(scheme), cleanedURL, nil +} + +var illegalQueryParms = []string{"Expires", "Policy", "Signature", "Key-Pair-Id"} + +func validateURL(u string) error { + parsed, err := url.Parse(u) + if err != nil { + return fmt.Errorf("unable to parse URL, err: %s", err.Error()) + } + + if parsed.Scheme == "" { + return fmt.Errorf("URL missing valid scheme, %s", u) + } + + q := parsed.Query() + for _, p := range illegalQueryParms { + if _, ok := q[p]; ok { + return fmt.Errorf("%s cannot be a query parameter for a signed URL", p) + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/api.go new file mode 100644 index 000000000000..cc07c4c553bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -0,0 +1,6311 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package s3 provides a client for Amazon Simple Storage Service. +package s3 + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a request for the AbortMultipartUpload operation. +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &AbortMultipartUploadOutput{} + req.Data = output + return +} + +// Aborts a multipart upload. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the List Parts operation and ensure the +// parts list is empty. +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a request for the CompleteMultipartUpload operation. +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CompleteMultipartUploadOutput{} + req.Data = output + return +} + +// Completes a multipart upload by assembling previously uploaded parts. +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a request for the CopyObject operation. +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyObjectOutput{} + req.Data = output + return +} + +// Creates a copy of an object that is already stored in Amazon S3. +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + err := req.Send() + return out, err +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a request for the CreateBucket operation. +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateBucketOutput{} + req.Data = output + return +} + +// Creates a new bucket. +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + err := req.Send() + return out, err +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a request for the CreateMultipartUpload operation. +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateMultipartUploadOutput{} + req.Data = output + return +} + +// Initiates a multipart upload and returns an upload ID. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a request for the DeleteBucket operation. +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketOutput{} + req.Data = output + return +} + +// Deletes the bucket. All objects (including all object versions and Delete +// Markers) in the bucket must be deleted before the bucket itself can be deleted. +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a request for the DeleteBucketCors operation. +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketCorsOutput{} + req.Data = output + return +} + +// Deletes the cors configuration information set for the bucket. +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a request for the DeleteBucketLifecycle operation. +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketLifecycleOutput{} + req.Data = output + return +} + +// Deletes the lifecycle configuration from the bucket. +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a request for the DeleteBucketPolicy operation. +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketPolicyOutput{} + req.Data = output + return +} + +// Deletes the policy from the bucket. +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a request for the DeleteBucketReplication operation. +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketReplicationOutput{} + req.Data = output + return +} + +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a request for the DeleteBucketTagging operation. +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketTaggingOutput{} + req.Data = output + return +} + +// Deletes the tags from the bucket. +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a request for the DeleteBucketWebsite operation. +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketWebsiteOutput{} + req.Data = output + return +} + +// This operation removes the website configuration from the bucket. +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a request for the DeleteObject operation. +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteObjectOutput{} + req.Data = output + return +} + +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects. +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + err := req.Send() + return out, err +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a request for the DeleteObjects operation. +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteObjectsOutput{} + req.Data = output + return +} + +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. You may specify up to 1000 keys. +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a request for the GetBucketAcl operation. +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketAclOutput{} + req.Data = output + return +} + +// Gets the access control policy for the bucket. +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a request for the GetBucketCors operation. +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketCorsOutput{} + req.Data = output + return +} + +// Returns the cors configuration for the bucket. +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a request for the GetBucketLifecycle operation. +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLifecycleOutput{} + req.Data = output + return +} + +// Deprecated, see the GetBucketLifecycleConfiguration operation. +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a request for the GetBucketLifecycleConfiguration operation. +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLifecycleConfigurationOutput{} + req.Data = output + return +} + +// Returns the lifecycle configuration information set on the bucket. +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a request for the GetBucketLocation operation. +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLocationOutput{} + req.Data = output + return +} + +// Returns the region the bucket resides in. +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a request for the GetBucketLogging operation. +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLoggingOutput{} + req.Data = output + return +} + +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a request for the GetBucketNotification operation. +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + req = c.newRequest(op, input, output) + output = &NotificationConfigurationDeprecated{} + req.Data = output + return +} + +// Deprecated, see the GetBucketNotificationConfiguration operation. +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a request for the GetBucketNotificationConfiguration operation. +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + req = c.newRequest(op, input, output) + output = &NotificationConfiguration{} + req.Data = output + return +} + +// Returns the notification configuration of a bucket. +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a request for the GetBucketPolicy operation. +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketPolicyOutput{} + req.Data = output + return +} + +// Returns the policy of a specified bucket. +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a request for the GetBucketReplication operation. +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketReplicationOutput{} + req.Data = output + return +} + +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a request for the GetBucketRequestPayment operation. +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketRequestPaymentOutput{} + req.Data = output + return +} + +// Returns the request payment configuration of a bucket. +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a request for the GetBucketTagging operation. +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketTaggingOutput{} + req.Data = output + return +} + +// Returns the tag set associated with the bucket. +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a request for the GetBucketVersioning operation. +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketVersioningOutput{} + req.Data = output + return +} + +// Returns the versioning state of a bucket. +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a request for the GetBucketWebsite operation. +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketWebsiteOutput{} + req.Data = output + return +} + +// Returns the website configuration for a bucket. +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a request for the GetObject operation. +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectOutput{} + req.Data = output + return +} + +// Retrieves objects from Amazon S3. +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + err := req.Send() + return out, err +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a request for the GetObjectAcl operation. +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectAclOutput{} + req.Data = output + return +} + +// Returns the access control list (ACL) of an object. +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + err := req.Send() + return out, err +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a request for the GetObjectTorrent operation. +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectTorrentOutput{} + req.Data = output + return +} + +// Return torrent files from a bucket. +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + err := req.Send() + return out, err +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a request for the HeadBucket operation. +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &HeadBucketOutput{} + req.Data = output + return +} + +// This operation is useful to determine if a bucket exists and you have permission +// to access it. +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + err := req.Send() + return out, err +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a request for the HeadObject operation. +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &HeadObjectOutput{} + req.Data = output + return +} + +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + err := req.Send() + return out, err +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a request for the ListBuckets operation. +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListBucketsOutput{} + req.Data = output + return +} + +// Returns a list of all buckets owned by the authenticated sender of the request. +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + err := req.Send() + return out, err +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a request for the ListMultipartUploads operation. +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMultipartUploadsOutput{} + req.Data = output + return +} + +// This operation lists in-progress multipart uploads. +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMultipartUploadsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMultipartUploadsOutput), lastPage) + }) +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a request for the ListObjectVersions operation. +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectVersionsOutput{} + req.Data = output + return +} + +// Returns metadata about all of the versions of objects in a bucket. +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectVersionsOutput), lastPage) + }) +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a request for the ListObjects operation. +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectsOutput{} + req.Data = output + return +} + +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectsOutput), lastPage) + }) +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a request for the ListParts operation. +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPartsOutput{} + req.Data = output + return +} + +// Lists the parts that have been uploaded for a specific multipart upload. +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPartsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPartsOutput), lastPage) + }) +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a request for the PutBucketAcl operation. +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketAclOutput{} + req.Data = output + return +} + +// Sets the permissions on a bucket using access control lists (ACL). +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a request for the PutBucketCors operation. +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketCorsOutput{} + req.Data = output + return +} + +// Sets the cors configuration for a bucket. +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a request for the PutBucketLifecycle operation. +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLifecycleOutput{} + req.Data = output + return +} + +// Deprecated, see the PutBucketLifecycleConfiguration operation. +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a request for the PutBucketLifecycleConfiguration operation. +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLifecycleConfigurationOutput{} + req.Data = output + return +} + +// Sets lifecycle configuration for your bucket. If a lifecycle configuration +// exists, it replaces it. +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a request for the PutBucketLogging operation. +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLoggingOutput{} + req.Data = output + return +} + +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. To set the logging status of +// a bucket, you must be the bucket owner. +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a request for the PutBucketNotification operation. +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketNotificationOutput{} + req.Data = output + return +} + +// Deprecated, see the PutBucketNotificationConfiguraiton operation. +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a request for the PutBucketNotificationConfiguration operation. +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketNotificationConfigurationOutput{} + req.Data = output + return +} + +// Enables notifications of specified events for a bucket. +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a request for the PutBucketPolicy operation. +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketPolicyOutput{} + req.Data = output + return +} + +// Replaces a policy on a bucket. If the bucket already has a policy, the one +// in this request completely replaces it. +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a request for the PutBucketReplication operation. +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketReplicationOutput{} + req.Data = output + return +} + +// Creates a new replication configuration (or replaces an existing one, if +// present). +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a request for the PutBucketRequestPayment operation. +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketRequestPaymentOutput{} + req.Data = output + return +} + +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. Documentation on requester pays buckets +// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a request for the PutBucketTagging operation. +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketTaggingOutput{} + req.Data = output + return +} + +// Sets the tags for a bucket. +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a request for the PutBucketVersioning operation. +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketVersioningOutput{} + req.Data = output + return +} + +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a request for the PutBucketWebsite operation. +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketWebsiteOutput{} + req.Data = output + return +} + +// Set the website configuration for a bucket. +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a request for the PutObject operation. +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &PutObjectOutput{} + req.Data = output + return +} + +// Adds an object to a bucket. +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + err := req.Send() + return out, err +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a request for the PutObjectAcl operation. +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + req = c.newRequest(op, input, output) + output = &PutObjectAclOutput{} + req.Data = output + return +} + +// uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + err := req.Send() + return out, err +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a request for the RestoreObject operation. +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreObjectOutput{} + req.Data = output + return +} + +// Restores an archived copy of an object back into Amazon S3 +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + err := req.Send() + return out, err +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a request for the UploadPart operation. +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadPartOutput{} + req.Data = output + return +} + +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + err := req.Send() + return out, err +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a request for the UploadPartCopy operation. +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadPartCopyOutput{} + req.Data = output + return +} + +// Uploads a part by copying data from an existing object as data source. +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + err := req.Send() + return out, err +} + +type AbortMultipartUploadInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +type CORSRule struct { + _ struct{} `type:"structure"` + + // Specifies which headers are allowed in a pre-flight OPTIONS request. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // Identifies HTTP methods that the domain/origin specified in the rule is allowed + // to execute. + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + CloudFunction *string `type:"string"` + + // Bucket event for which to send notifications. + Event *string `type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + InvocationRole *string `type:"string"` +} + +// String returns the string representation +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +type CommonPrefix struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +type CompleteMultipartUploadInput struct { + _ struct{} `type:"structure" payload:"MultipartUpload"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + Bucket *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + Key *string `min:"1" type:"string"` + + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +type CopyObjectInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + CopyObjectResult *CopyObjectResult `type:"structure"` + + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() +} + +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyPartResult) GoString() string { + return s.String() +} + +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the region where the bucket will be created. If you don't specify + // a region, the bucket will be created in US Standard. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +type CreateBucketInput struct { + _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +type CreateMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `locationName:"Bucket" type:"string"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +type Delete struct { + _ struct{} `type:"structure"` + + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +type DeleteBucketCorsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +type DeleteObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +type DeleteObjectsInput struct { + _ struct{} `type:"structure" payload:"Delete"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Delete *Delete `locationName:"Delete" type:"structure" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + Deleted []*DeletedObject `type:"list" flattened:"true"` + + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +type DeletedObject struct { + _ struct{} `type:"structure"` + + DeleteMarker *bool `type:"boolean"` + + DeleteMarkerVersionId *string `type:"string"` + + Key *string `min:"1" type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} + +type Destination struct { + _ struct{} `type:"structure"` + + // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store + // replicas of the object identified by the rule. + Bucket *string `type:"string" required:"true"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +type Error struct { + _ struct{} `type:"structure"` + + Code *string `type:"string"` + + Key *string `min:"1" type:"string"` + + Message *string `type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Container for key value pair that defines the criteria for the filter rule. +type FilterRule struct { + _ struct{} `type:"structure"` + + // Object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. + // Overlapping prefixes and suffixes are not supported. For more information, + // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Name *string `type:"string" enum:"FilterRuleName"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterRule) GoString() string { + return s.String() +} + +type GetBucketAclInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +type GetBucketCorsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type GetBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +type GetBucketLocationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +type GetBucketLoggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `type:"structure"` + + // Name of the buket to get the notification configuration for. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +type GetBucketPolicyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +type GetBucketReplicationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +type GetBucketRequestPaymentInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type GetBucketTaggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +type GetBucketVersioningInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +type GetBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +type GetObjectAclInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +type GetObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectOutput) GoString() string { + return s.String() +} + +type GetObjectTorrentInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +type Grant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grant) GoString() string { + return s.String() +} + +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grantee) GoString() string { + return s.String() +} + +type HeadBucketInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketInput) GoString() string { + return s.String() +} + +type HeadBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +type HeadObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectInput) GoString() string { + return s.String() +} + +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ + // the data that is returned will be for the object with the key name images/index.html) + // The suffix must not be empty and must not include a slash character. + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} + +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// Container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for key value pair that defines the criteria for the + // filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() +} + +// Container for specifying the AWS Lambda notification configuration. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` +} + +// String returns the string representation +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +type LifecycleRule struct { + _ struct{} `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRule) GoString() string { + return s.String() +} + +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() +} + +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +type ListMultipartUploadsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` + + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +type ListObjectVersionsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. If your results were truncated, you can + // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last Key returned in a truncated response. + KeyMarker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // Use this value for the key marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // Use this value for the next version id marker parameter in a subsequent request. + NextVersionIdMarker *string `type:"string"` + + Prefix *string `type:"string"` + + VersionIdMarker *string `type:"string"` + + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +type ListObjectsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` +} + +// String returns the string representation +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsInput) GoString() string { + return s.String() +} + +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Contents []*Object `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + Marker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +type ListPartsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + Owner *Owner `type:"structure"` + + // Part number after which listing begins. + PartNumberMarker *int64 `type:"integer"` + + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + TargetBucket *string `type:"string"` + + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // This element lets you specify a prefix for the keys that the log files will + // be stored under. + TargetPrefix *string `type:"string"` +} + +// String returns the string representation +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingEnabled) GoString() string { + return s.String() +} + +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultipartUpload) GoString() string { + return s.String() +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage + // Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA or GLACIER storage class. If your bucket is +// versioning-enabled (or versioning is suspended), you can set this action +// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA +// or GLACIER storage class at a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage + // Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// Container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off on the bucket. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// Container for object key name filtering rules. For information about key +// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // Container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +type Object struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + Key *string `min:"1" type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Object) GoString() string { + return s.String() +} + +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object to delete. + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +type ObjectVersion struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectVersion) GoString() string { + return s.String() +} + +type Owner struct { + _ struct{} `type:"structure"` + + DisplayName *string `type:"string"` + + ID *string `type:"string"` +} + +// String returns the string representation +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Owner) GoString() string { + return s.String() +} + +type Part struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size of the uploaded part data. + Size *int64 `type:"integer"` +} + +// String returns the string representation +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Part) GoString() string { + return s.String() +} + +type PutBucketAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +type PutBucketCorsInput struct { + _ struct{} `type:"structure" payload:"CORSConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +type PutBucketLoggingInput struct { + _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for specifying the notification configuration of the bucket. If + // this element is empty, notifications are turned off on the bucket. + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +type PutBucketPolicyInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The bucket policy as a JSON document. + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +type PutBucketReplicationInput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentInput struct { + _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type PutBucketTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +type PutBucketVersioningInput struct { + _ struct{} `type:"structure" payload:"VersioningConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +type PutBucketWebsiteInput struct { + _ struct{} `type:"structure" payload:"WebsiteConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +type PutObjectAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +type PutObjectInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectInput) GoString() string { + return s.String() +} + +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// Container for specifying an configuration when you want Amazon S3 to publish +// events to an Amazon Simple Queue Service (Amazon SQS) queue. +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfiguration) GoString() string { + return s.String() +} + +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + Queue *string `type:"string"` +} + +// String returns the string representation +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the sibling is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Redirect) GoString() string { + return s.String() +} + +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests will be redirected. + HostName *string `type:"string" required:"true"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Container for replication rules. You can add as many as 1,000 rules. Total +// replication configuration size can be up to 2 MB. +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating + // the objects. + Role *string `type:"string" required:"true"` + + // Container for information about a particular replication rule. Replication + // configuration must have at least one rule and can contain up to 1,000 rules. + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +type ReplicationRule struct { + _ struct{} `type:"structure"` + + Destination *Destination `type:"structure" required:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Object keyname prefix identifying one or more objects to which the rule applies. + // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes + // are not supported. + Prefix *string `type:"string" required:"true"` + + // The rule is ignored if status is not Enabled. + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRule) GoString() string { + return s.String() +} + +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +type RestoreObjectInput struct { + _ struct{} `type:"structure" payload:"RestoreRequest"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can can specify a different error code to return. + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +type Rule struct { + _ struct{} `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA or GLACIER storage class. If your bucket is + // versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA + // or GLACIER storage class at a specific period in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transition *Transition `type:"structure"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the tag. + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +type Tagging struct { + _ struct{} `type:"structure"` + + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tagging) GoString() string { + return s.String() +} + +type TargetGrant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Logging permissions assigned to the Grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGrant) GoString() string { + return s.String() +} + +// Container for specifying the configuration when you want Amazon S3 to publish +// events to an Amazon Simple Notification Service (Amazon SNS) topic. +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfiguration) GoString() string { + return s.String() +} + +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +type UploadPartCopyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first ten bytes of the source. You can copy a range only if the source object + // is greater than 5 GB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +type UploadPartInput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadSeeker `type:"blob"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartInput) GoString() string { + return s.String() +} + +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartOutput) GoString() string { + return s.String() +} + +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +const ( + // @enum BucketCannedACL + BucketCannedACLPrivate = "private" + // @enum BucketCannedACL + BucketCannedACLPublicRead = "public-read" + // @enum BucketCannedACL + BucketCannedACLPublicReadWrite = "public-read-write" + // @enum BucketCannedACL + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +const ( + // @enum BucketLocationConstraint + BucketLocationConstraintEu = "EU" + // @enum BucketLocationConstraint + BucketLocationConstraintEuWest1 = "eu-west-1" + // @enum BucketLocationConstraint + BucketLocationConstraintUsWest1 = "us-west-1" + // @enum BucketLocationConstraint + BucketLocationConstraintUsWest2 = "us-west-2" + // @enum BucketLocationConstraint + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + // @enum BucketLocationConstraint + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + // @enum BucketLocationConstraint + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + // @enum BucketLocationConstraint + BucketLocationConstraintSaEast1 = "sa-east-1" + // @enum BucketLocationConstraint + BucketLocationConstraintCnNorth1 = "cn-north-1" + // @enum BucketLocationConstraint + BucketLocationConstraintEuCentral1 = "eu-central-1" +) + +const ( + // @enum BucketLogsPermission + BucketLogsPermissionFullControl = "FULL_CONTROL" + // @enum BucketLogsPermission + BucketLogsPermissionRead = "READ" + // @enum BucketLogsPermission + BucketLogsPermissionWrite = "WRITE" +) + +const ( + // @enum BucketVersioningStatus + BucketVersioningStatusEnabled = "Enabled" + // @enum BucketVersioningStatus + BucketVersioningStatusSuspended = "Suspended" +) + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key may contain any Unicode character; +// however, XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // @enum EncodingType + EncodingTypeUrl = "url" +) + +// Bucket event for which to send notifications. +const ( + // @enum Event + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + // @enum Event + EventS3ObjectCreated = "s3:ObjectCreated:*" + // @enum Event + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + // @enum Event + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + // @enum Event + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + // @enum Event + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + // @enum Event + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + // @enum Event + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + // @enum Event + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" +) + +const ( + // @enum ExpirationStatus + ExpirationStatusEnabled = "Enabled" + // @enum ExpirationStatus + ExpirationStatusDisabled = "Disabled" +) + +const ( + // @enum FilterRuleName + FilterRuleNamePrefix = "prefix" + // @enum FilterRuleName + FilterRuleNameSuffix = "suffix" +) + +const ( + // @enum MFADelete + MFADeleteEnabled = "Enabled" + // @enum MFADelete + MFADeleteDisabled = "Disabled" +) + +const ( + // @enum MFADeleteStatus + MFADeleteStatusEnabled = "Enabled" + // @enum MFADeleteStatus + MFADeleteStatusDisabled = "Disabled" +) + +const ( + // @enum MetadataDirective + MetadataDirectiveCopy = "COPY" + // @enum MetadataDirective + MetadataDirectiveReplace = "REPLACE" +) + +const ( + // @enum ObjectCannedACL + ObjectCannedACLPrivate = "private" + // @enum ObjectCannedACL + ObjectCannedACLPublicRead = "public-read" + // @enum ObjectCannedACL + ObjectCannedACLPublicReadWrite = "public-read-write" + // @enum ObjectCannedACL + ObjectCannedACLAuthenticatedRead = "authenticated-read" + // @enum ObjectCannedACL + ObjectCannedACLAwsExecRead = "aws-exec-read" + // @enum ObjectCannedACL + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + // @enum ObjectCannedACL + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +const ( + // @enum ObjectStorageClass + ObjectStorageClassStandard = "STANDARD" + // @enum ObjectStorageClass + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + // @enum ObjectStorageClass + ObjectStorageClassGlacier = "GLACIER" +) + +const ( + // @enum ObjectVersionStorageClass + ObjectVersionStorageClassStandard = "STANDARD" +) + +const ( + // @enum Payer + PayerRequester = "Requester" + // @enum Payer + PayerBucketOwner = "BucketOwner" +) + +const ( + // @enum Permission + PermissionFullControl = "FULL_CONTROL" + // @enum Permission + PermissionWrite = "WRITE" + // @enum Permission + PermissionWriteAcp = "WRITE_ACP" + // @enum Permission + PermissionRead = "READ" + // @enum Permission + PermissionReadAcp = "READ_ACP" +) + +const ( + // @enum Protocol + ProtocolHttp = "http" + // @enum Protocol + ProtocolHttps = "https" +) + +const ( + // @enum ReplicationRuleStatus + ReplicationRuleStatusEnabled = "Enabled" + // @enum ReplicationRuleStatus + ReplicationRuleStatusDisabled = "Disabled" +) + +const ( + // @enum ReplicationStatus + ReplicationStatusComplete = "COMPLETE" + // @enum ReplicationStatus + ReplicationStatusPending = "PENDING" + // @enum ReplicationStatus + ReplicationStatusFailed = "FAILED" + // @enum ReplicationStatus + ReplicationStatusReplica = "REPLICA" +) + +// If present, indicates that the requester was successfully charged for the +// request. +const ( + // @enum RequestCharged + RequestChargedRequester = "requester" +) + +// Confirms that the requester knows that she or he will be charged for the +// request. Bucket owners need not specify this parameter in their requests. +// Documentation on downloading objects from requester pays buckets can be found +// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html +const ( + // @enum RequestPayer + RequestPayerRequester = "requester" +) + +const ( + // @enum ServerSideEncryption + ServerSideEncryptionAes256 = "AES256" + // @enum ServerSideEncryption + ServerSideEncryptionAwsKms = "aws:kms" +) + +const ( + // @enum StorageClass + StorageClassStandard = "STANDARD" + // @enum StorageClass + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + // @enum StorageClass + StorageClassStandardIa = "STANDARD_IA" +) + +const ( + // @enum TransitionStorageClass + TransitionStorageClassGlacier = "GLACIER" + // @enum TransitionStorageClass + TransitionStorageClassStandardIa = "STANDARD_IA" +) + +const ( + // @enum Type + TypeCanonicalUser = "CanonicalUser" + // @enum Type + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + // @enum Type + TypeGroup = "Group" +) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go new file mode 100644 index 000000000000..c3a2702dad4d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,43 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = &loc + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go new file mode 100644 index 000000000000..9fc5df94d335 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// contentMD5 computes and sets the HTTP Content-MD5 header for requests that +// require it. +func contentMD5(r *request.Request) { + h := md5.New() + + // hash the body. seek back to the first position after reading to reset + // the body for transmission. copy errors may be assumed to be from the + // body. + _, err := io.Copy(h, r.Body) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to read body", err) + return + } + _, err = r.Body.Seek(0, 0) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to seek body", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + r.HTTPRequest.Header.Set("Content-MD5", string(sum64)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go new file mode 100644 index 000000000000..c227f242c15e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -0,0 +1,37 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = func(c *client.Client) { + // Support building custom host-style bucket endpoints + c.Handlers.Build.PushFront(updateHostWithBucket) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeys) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) + } + + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration: + // These S3 operations require Content-MD5 to be set + r.Handlers.Build.PushBack(contentMD5) + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 000000000000..b7cf4f9cc30c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,60 @@ +package s3 + +import ( + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// hostStyleBucketName returns true if the request should put the bucket in +// the host. This is false if S3ForcePathStyle is explicitly set or if the +// bucket is not DNS compatible. +func hostStyleBucketName(r *request.Request, bucket string) bool { + if aws.BoolValue(r.Config.S3ForcePathStyle) { + return false + } + + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if r.HTTPRequest.URL.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // GetBucketLocation should be able to be called from any region within + // a partition, and return the associated region of the bucket. + if r.Operation.Name == opGetBucketLocation { + return false + } + + // Use host-style if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +func updateHostWithBucket(r *request.Request) { + b, _ := awsutil.ValuesAtPath(r.Params, "Bucket") + if len(b) == 0 { + return + } + + if bucket := b[0].(*string); aws.StringValue(bucket) != "" && hostStyleBucketName(r, *bucket) { + r.HTTPRequest.URL.Host = *bucket + "." + r.HTTPRequest.URL.Host + r.HTTPRequest.URL.Path = strings.Replace(r.HTTPRequest.URL.Path, "/{Bucket}", "", -1) + if r.HTTPRequest.URL.Path == "" { + r.HTTPRequest.URL.Path = "/" + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/service.go new file mode 100644 index 000000000000..cf01da535019 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// S3 is a client for Amazon S3. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "s3" + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2006-03-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go new file mode 100644 index 000000000000..268ea2fb459e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -0,0 +1,44 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme != "https" { + p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") + if len(p) > 0 { + r.Error = errSSERequiresSSL + } + } +} + +func computeSSEKeys(r *request.Request) { + headers := []string{ + "x-amz-server-side-encryption-customer-key", + "x-amz-copy-source-server-side-encryption-customer-key", + } + + for _, h := range headers { + md5h := h + "-md5" + if key := r.HTTPRequest.Header.Get(h); key != "" { + // Base64-encode the value + b64v := base64.StdEncoding.EncodeToString([]byte(key)) + r.HTTPRequest.Header.Set(h, b64v) + + // Add MD5 if it wasn't computed + if r.HTTPRequest.Header.Get(md5h) == "" { + sum := md5.Sum([]byte(key)) + b64sum := base64.StdEncoding.EncodeToString(sum[:]) + r.HTTPRequest.Header.Set(md5h, b64sum) + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go new file mode 100644 index 000000000000..ce65fcdaf6cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "bytes" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +func copyMultipartStatusOKUnmarhsalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to read response body", err) + return + } + body := bytes.NewReader(b) + r.HTTPResponse.Body = aws.ReadSeekCloser(body) + defer r.HTTPResponse.Body.(aws.ReaderSeekerCloser).Seek(0, 0) + + if body.Len() == 0 { + // If there is no body don't attempt to parse the body. + return + } + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == "SerializationError" { + r.Error = nil + return + } + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 000000000000..cd6004825fe2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,57 @@ +package s3 + +import ( + "encoding/xml" + "fmt" + "io" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", + fmt.Sprintf("incorrect region, the bucket is not in '%s' region", + aws.StringValue(r.Config.Region)), + nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + if r.HTTPResponse.ContentLength == 0 { + // No body, use status code to generate an awserr.Error + r.Error = awserr.NewRequestFailure( + awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil) + } else { + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go new file mode 100644 index 000000000000..4879fca8a0cd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -0,0 +1,117 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadBucket", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 403, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadBucket", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadObject", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadObject", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/LICENSE.txt b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/LICENSE.txt new file mode 100644 index 000000000000..918297133253 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/LICENSE.txt @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015-2015 Li Yi (denverdino@gmail.com). + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/client.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/client.go new file mode 100644 index 000000000000..4ed0a06f2de6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/client.go @@ -0,0 +1,145 @@ +package common + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "log" + "net/http" + "net/url" + "time" + + "github.com/denverdino/aliyungo/util" +) + +// A Client represents a client of ECS services +type Client struct { + AccessKeyId string //Access Key Id + AccessKeySecret string //Access Key Secret + debug bool + httpClient *http.Client + endpoint string + version string +} + +// NewClient creates a new instance of ECS client +func (client *Client) Init(endpoint, version, accessKeyId, accessKeySecret string) { + client.AccessKeyId = accessKeyId + client.AccessKeySecret = accessKeySecret + "&" + client.debug = false + client.httpClient = &http.Client{} + client.endpoint = endpoint + client.version = version +} + +// SetEndpoint sets custom endpoint +func (client *Client) SetEndpoint(endpoint string) { + client.endpoint = endpoint +} + +// SetEndpoint sets custom version +func (client *Client) SetVersion(version string) { + client.version = version +} + +// SetAccessKeyId sets new AccessKeyId +func (client *Client) SetAccessKeyId(id string) { + client.AccessKeyId = id +} + +// SetAccessKeySecret sets new AccessKeySecret +func (client *Client) SetAccessKeySecret(secret string) { + client.AccessKeySecret = secret + "&" +} + +// SetDebug sets debug mode to log the request/response message +func (client *Client) SetDebug(debug bool) { + client.debug = debug +} + +// Invoke sends the raw HTTP request for ECS services +func (client *Client) Invoke(action string, args interface{}, response interface{}) error { + + request := Request{} + request.init(client.version, action, client.AccessKeyId) + + query := util.ConvertToQueryValues(request) + util.SetQueryValues(args, &query) + + // Sign request + signature := util.CreateSignatureForRequest(ECSRequestMethod, &query, client.AccessKeySecret) + + // Generate the request URL + requestURL := client.endpoint + "?" + query.Encode() + "&Signature=" + url.QueryEscape(signature) + + httpReq, err := http.NewRequest(ECSRequestMethod, requestURL, nil) + + // TODO move to util and add build val flag + httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version) + + if err != nil { + return GetClientError(err) + } + + t0 := time.Now() + httpResp, err := client.httpClient.Do(httpReq) + t1 := time.Now() + if err != nil { + return GetClientError(err) + } + statusCode := httpResp.StatusCode + + if client.debug { + log.Printf("Invoke %s %s %d (%v)", ECSRequestMethod, requestURL, statusCode, t1.Sub(t0)) + } + + defer httpResp.Body.Close() + body, err := ioutil.ReadAll(httpResp.Body) + + if err != nil { + return GetClientError(err) + } + + if client.debug { + var prettyJSON bytes.Buffer + err = json.Indent(&prettyJSON, body, "", " ") + log.Println(string(prettyJSON.Bytes())) + } + + if statusCode >= 400 && statusCode <= 599 { + errorResponse := ErrorResponse{} + err = json.Unmarshal(body, &errorResponse) + ecsError := &Error{ + ErrorResponse: errorResponse, + StatusCode: statusCode, + } + return ecsError + } + + err = json.Unmarshal(body, response) + //log.Printf("%++v", response) + if err != nil { + return GetClientError(err) + } + + return nil +} + +// GenerateClientToken generates the Client Token with random string +func (client *Client) GenerateClientToken() string { + return util.CreateRandomString() +} + +func GetClientErrorFromString(str string) error { + return &Error{ + ErrorResponse: ErrorResponse{ + Code: "AliyunGoClientFailure", + Message: str, + }, + StatusCode: -1, + } +} + +func GetClientError(err error) error { + return GetClientErrorFromString(err.Error()) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/regions.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/regions.go new file mode 100644 index 000000000000..db36f49b2123 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/regions.go @@ -0,0 +1,18 @@ +package common + +// Region represents ECS region +type Region string + +// Constants of region definition +const ( + Hangzhou = Region("cn-hangzhou") + Qingdao = Region("cn-qingdao") + Beijing = Region("cn-beijing") + Hongkong = Region("cn-hongkong") + Shenzhen = Region("cn-shenzhen") + USWest1 = Region("us-west-1") + APSouthEast1 = Region("ap-southeast-1") + Shanghai = Region("cn-shanghai") +) + +var ValidRegions = []Region{Hangzhou, Qingdao, Beijing, Shenzhen, Hongkong, Shanghai, USWest1, APSouthEast1} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/request.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/request.go new file mode 100644 index 000000000000..2a883f19b204 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/request.go @@ -0,0 +1,101 @@ +package common + +import ( + "fmt" + "log" + "time" + + "github.com/denverdino/aliyungo/util" +) + +// Constants for Aliyun API requests +const ( + SignatureVersion = "1.0" + SignatureMethod = "HMAC-SHA1" + JSONResponseFormat = "JSON" + XMLResponseFormat = "XML" + ECSRequestMethod = "GET" +) + +type Request struct { + Format string + Version string + AccessKeyId string + Signature string + SignatureMethod string + Timestamp util.ISO6801Time + SignatureVersion string + SignatureNonce string + ResourceOwnerAccount string + Action string +} + +func (request *Request) init(version string, action string, AccessKeyId string) { + request.Format = JSONResponseFormat + request.Timestamp = util.NewISO6801Time(time.Now().UTC()) + request.Version = version + request.SignatureVersion = SignatureVersion + request.SignatureMethod = SignatureMethod + request.SignatureNonce = util.CreateRandomString() + request.Action = action + request.AccessKeyId = AccessKeyId +} + +type Response struct { + RequestId string +} + +type ErrorResponse struct { + Response + HostId string + Code string + Message string +} + +// An Error represents a custom error for Aliyun API failure response +type Error struct { + ErrorResponse + StatusCode int //Status Code of HTTP Response +} + +func (e *Error) Error() string { + return fmt.Sprintf("Aliyun API Error: RequestId: %s Status Code: %d Code: %s Message: %s", e.RequestId, e.StatusCode, e.Code, e.Message) +} + +type Pagination struct { + PageNumber int + PageSize int +} + +func (p *Pagination) SetPageSize(size int) { + p.PageSize = size +} + +func (p *Pagination) Validate() { + if p.PageNumber < 0 { + log.Printf("Invalid PageNumber: %d", p.PageNumber) + p.PageNumber = 1 + } + if p.PageSize < 0 { + log.Printf("Invalid PageSize: %d", p.PageSize) + p.PageSize = 10 + } else if p.PageSize > 50 { + log.Printf("Invalid PageSize: %d", p.PageSize) + p.PageSize = 50 + } +} + +// A PaginationResponse represents a response with pagination information +type PaginationResult struct { + TotalCount int + PageNumber int + PageSize int +} + +// NextPage gets the next page of the result set +func (r *PaginationResult) NextPage() *Pagination { + if r.PageNumber*r.PageSize >= r.TotalCount { + return nil + } + return &Pagination{PageNumber: r.PageNumber + 1, PageSize: r.PageSize} +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/types.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/types.go new file mode 100644 index 000000000000..c9f1dc8c1636 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/types.go @@ -0,0 +1,8 @@ +package common + +type InternetChargeType string + +const ( + PayByBandwidth = InternetChargeType("PayByBandwidth") + PayByTraffic = InternetChargeType("PayByTraffic") +) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/version.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/version.go new file mode 100644 index 000000000000..7cb3d3aff054 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/version.go @@ -0,0 +1,3 @@ +package common + +const Version = "0.1" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/client.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/client.go index 17b2d3ce7799..ac3e8f029860 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/client.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/client.go @@ -8,7 +8,6 @@ import ( "encoding/base64" "encoding/xml" "fmt" - "github.com/denverdino/aliyungo/util" "io" "io/ioutil" "log" @@ -22,6 +21,9 @@ import ( "strconv" "strings" "time" + + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/util" ) const DefaultContentType = "application/octet-stream" @@ -34,7 +36,6 @@ type Client struct { Internal bool Secure bool ConnectTimeout time.Duration - ReadTimeout time.Duration endpoint string debug bool @@ -168,6 +169,8 @@ func (client *Client) SetEndpoint(endpoint string) { } // PutBucket creates a new bucket. +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&PutBucket func (b *Bucket) PutBucket(perm ACL) error { headers := make(http.Header) if perm != "" { @@ -185,13 +188,16 @@ func (b *Bucket) PutBucket(perm ACL) error { // DelBucket removes an existing bucket. All objects in the bucket must // be removed before the bucket itself can be removed. +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&DeleteBucket func (b *Bucket) DelBucket() (err error) { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: "/", - } for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "DELETE", + bucket: b.Name, + path: "/", + } + err = b.Client.query(req, nil) if !shouldRetry(err) { break @@ -201,6 +207,8 @@ func (b *Bucket) DelBucket() (err error) { } // Get retrieves an object from an bucket. +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&GetObject func (b *Bucket) Get(path string) (data []byte, err error) { body, err := b.GetReader(path) if err != nil { @@ -237,16 +245,17 @@ func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) { // It is the caller's responsibility to call Close on rc when // finished reading func (b *Bucket) GetResponseWithHeaders(path string, headers http.Header) (resp *http.Response, err error) { - req := &request{ - bucket: b.Name, - path: path, - headers: headers, - } - err = b.Client.prepare(req) - if err != nil { - return nil, err - } for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + bucket: b.Name, + path: path, + headers: headers, + } + err = b.Client.prepare(req) + if err != nil { + return nil, err + } + resp, err := b.Client.run(req, nil) if shouldRetry(err) && attempt.HasNext() { continue @@ -271,17 +280,18 @@ func (b *Bucket) GetWithParams(path string, params url.Values) (data []byte, err } func (b *Bucket) GetResponseWithParamsAndHeaders(path string, params url.Values, headers http.Header) (resp *http.Response, err error) { - req := &request{ - bucket: b.Name, - path: path, - params: params, - headers: headers, - } - err = b.Client.prepare(req) - if err != nil { - return nil, err - } for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + bucket: b.Name, + path: path, + params: params, + headers: headers, + } + err = b.Client.prepare(req) + if err != nil { + return nil, err + } + resp, err := b.Client.run(req, nil) if shouldRetry(err) && attempt.HasNext() { continue @@ -296,16 +306,17 @@ func (b *Bucket) GetResponseWithParamsAndHeaders(path string, params url.Values, // Exists checks whether or not an object exists on an bucket using a HEAD request. func (b *Bucket) Exists(path string) (exists bool, err error) { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - } - err = b.Client.prepare(req) - if err != nil { - return - } for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "HEAD", + bucket: b.Name, + path: path, + } + err = b.Client.prepare(req) + if err != nil { + return + } + resp, err := b.Client.run(req, nil) if shouldRetry(err) && attempt.HasNext() { @@ -332,19 +343,22 @@ func (b *Bucket) Exists(path string) (exists bool, err error) { } // Head HEADs an object in the bucket, returns the response with +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&HeadObject func (b *Bucket) Head(path string, headers http.Header) (*http.Response, error) { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - headers: headers, - } - err := b.Client.prepare(req) - if err != nil { - return nil, err - } for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "HEAD", + bucket: b.Name, + path: path, + headers: headers, + } + err := b.Client.prepare(req) + if err != nil { + return nil, err + } + resp, err := b.Client.run(req, nil) if shouldRetry(err) && attempt.HasNext() { continue @@ -352,18 +366,25 @@ func (b *Bucket) Head(path string, headers http.Header) (*http.Response, error) if err != nil { return nil, err } + if resp != nil && resp.Body != nil { + resp.Body.Close() + } return resp, err } return nil, fmt.Errorf("OSS Currently Unreachable") } // Put inserts an object into the bucket. +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&PutObject func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error { body := bytes.NewBuffer(data) return b.PutReader(path, body, int64(len(data)), contType, perm, options) } // PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&CopyObject func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) { headers := make(http.Header) @@ -376,6 +397,7 @@ func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source stri bucket: b.Name, path: path, headers: headers, + timeout: 5 * time.Minute, } resp := &CopyObjectResult{} err := b.Client.query(req, resp) @@ -418,8 +440,8 @@ func (b *Bucket) PutFile(path string, file *os.File, perm ACL, options Options) } stats, err := file.Stat() if err != nil { - log.Panicf("Unable to read file %s stats.", file.Name()) - return nil + log.Printf("Unable to read file %s stats.\n", file.Name()) + return err } return b.PutReader(path, file, stats.Size(), contentType, perm, options) @@ -502,6 +524,8 @@ type WebsiteConfiguration struct { } // PutBucketWebsite configures a bucket as a website. +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&PutBucketWebsite func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error { doc, err := xml.Marshal(configuration) if err != nil { @@ -530,6 +554,8 @@ func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length in } // Del removes an object from the bucket. +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&DeleteObject func (b *Bucket) Del(path string) error { req := &request{ method: "DELETE", @@ -550,6 +576,8 @@ type Object struct { } // DelMulti removes up to 1000 objects from the bucket. +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&DeleteMultipleObjects func (b *Bucket) DelMulti(objects Delete) error { doc, err := xml.Marshal(objects) if err != nil { @@ -627,7 +655,7 @@ type Key struct { // will return keys alphabetically greater than the marker. // // The max parameter specifies how many keys + common prefixes to return in -// the response. The default is 1000. +// the response, at most 1000. The default is 100. // // For example, given these keys in a bucket: // @@ -667,6 +695,8 @@ type Key struct { // }, // } // +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&GetBucket func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { params := make(url.Values) params.Set("prefix", prefix) @@ -675,12 +705,12 @@ func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, if max != 0 { params.Set("max-keys", strconv.FormatInt(int64(max), 10)) } - req := &request{ - bucket: b.Name, - params: params, - } result = &ListResp{} for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + bucket: b.Name, + params: params, + } err = b.Client.query(req, result) if !shouldRetry(err) { break @@ -700,66 +730,6 @@ func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, return result, nil } -//// The VersionsResp type holds the results of a list bucket Versions operation. -//type VersionsResp struct { -// Name string -// Prefix string -// KeyMarker string -// VersionIdMarker string -// MaxKeys int -// Delimiter string -// IsTruncated bool -// Versions []Version `xml:"Version"` -// CommonPrefixes []string `xml:">Prefix"` -//} - -//// The Version type represents an object version stored in an bucket. -//type Version struct { -// Key string -// VersionId string -// IsLatest bool -// LastModified string -// // ETag gives the hex-encoded MD5 sum of the contents, -// // surrounded with double-quotes. -// ETag string -// Size int64 -// Owner Owner -// StorageClass string -//} - -//func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) { -// params := url.Values{} -// params.Set("versions", "") -// params.Set("prefix", prefix) -// params.Set("delimiter", delim) - -// if len(versionIdMarker) != 0 { -// params["version-id-marker"] = []string{versionIdMarker} -// } -// if len(keyMarker) != 0 { -// params["key-marker"] = []string{keyMarker} -// } - -// if max != 0 { -// params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} -// } -// req := &request{ -// bucket: b.Name, -// params: params, -// } -// result = &VersionsResp{} -// for attempt := attempts.Start(); attempt.Next(); { -// err = b.Client.query(req, result) -// if !shouldRetry(err) { -// break -// } -// } -// if err != nil { -// return nil, err -// } -// return result, nil -//} - type GetLocationResp struct { Location string `xml:",innerxml"` } @@ -942,6 +912,7 @@ type request struct { baseurl string payload io.Reader prepared bool + timeout time.Duration } func (req *request) url() (*url.URL, error) { @@ -1062,6 +1033,8 @@ func (client *Client) setupHttpRequest(req *request) (*http.Request, error) { Form: req.params, } + hreq.Header.Set("X-SDK-Client", `AliyunGO/`+common.Version) + contentLength := req.headers.Get("Content-Length") if contentLength != "" { @@ -1079,28 +1052,11 @@ func (client *Client) setupHttpRequest(req *request) (*http.Request, error) { // doHttpRequest sends hreq and returns the http response from the server. // If resp is not nil, the XML data contained in the response // body will be unmarshalled on it. -func (client *Client) doHttpRequest(hreq *http.Request, resp interface{}) (*http.Response, error) { - c := http.Client{ - Transport: &http.Transport{ - Dial: func(netw, addr string) (c net.Conn, err error) { - deadline := time.Now().Add(client.ReadTimeout) - if client.ConnectTimeout > 0 { - c, err = net.DialTimeout(netw, addr, client.ConnectTimeout) - } else { - c, err = net.Dial(netw, addr) - } - if err != nil { - return - } - if client.ReadTimeout > 0 { - err = c.SetDeadline(deadline) - } - return - }, - Proxy: http.ProxyFromEnvironment, - }, - } +func (client *Client) doHttpRequest(c *http.Client, hreq *http.Request, resp interface{}) (*http.Response, error) { + if true { + log.Printf("%s %s ...\n", hreq.Method, hreq.URL.String()) + } hresp, err := c.Do(hreq) if err != nil { return nil, err @@ -1110,7 +1066,7 @@ func (client *Client) doHttpRequest(hreq *http.Request, resp interface{}) (*http contentType := hresp.Header.Get("Content-Type") if contentType == "application/xml" || contentType == "text/xml" { dump, _ := httputil.DumpResponse(hresp, true) - log.Printf("} -> %s\n", dump) + log.Printf("%s\n", dump) } else { log.Printf("Response Content-Type: %s\n", contentType) } @@ -1143,7 +1099,25 @@ func (client *Client) run(req *request, resp interface{}) (*http.Response, error return nil, err } - return client.doHttpRequest(hreq, resp) + c := &http.Client{ + Transport: &http.Transport{ + Dial: func(netw, addr string) (c net.Conn, err error) { + if client.ConnectTimeout > 0 { + c, err = net.DialTimeout(netw, addr, client.ConnectTimeout) + } else { + c, err = net.Dial(netw, addr) + } + if err != nil { + return + } + return + }, + Proxy: http.ProxyFromEnvironment, + }, + Timeout: req.timeout, + } + + return client.doHttpRequest(c, hreq, resp) } // Error represents an error in an operation with OSS. @@ -1157,7 +1131,7 @@ type Error struct { } func (e *Error) Error() string { - return e.Message + return fmt.Sprintf("Aliyun API Error: RequestId: %s Status Code: %d Code: %s Message: %s", e.RequestId, e.StatusCode, e.Code, e.Message) } func (client *Client) buildError(r *http.Response) error { @@ -1186,10 +1160,21 @@ func (client *Client) buildError(r *http.Response) error { return &err } +type TimeoutError interface { + error + Timeout() bool // Is the error a timeout? +} + func shouldRetry(err error) bool { if err == nil { return false } + + _, ok := err.(TimeoutError) + if ok { + return true + } + switch err { case io.ErrUnexpectedEOF, io.EOF: return true @@ -1245,6 +1230,8 @@ type AccessControlPolicy struct { } // ACL returns ACL of bucket +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&GetBucketAcl func (b *Bucket) ACL() (result *AccessControlPolicy, err error) { params := make(url.Values) @@ -1263,3 +1250,75 @@ func (b *Bucket) ACL() (result *AccessControlPolicy, err error) { return &resp, nil } + +const minChunkSize = 5 << 20 +const defaultChunkSize = 2 * minChunkSize + +func (b *Bucket) GetContentLength(sourcePath string) (int64, error) { + resp, err := b.Head(sourcePath, nil) + if err != nil { + return 0, err + } + + currentLength := resp.ContentLength + + return currentLength, err +} + +// Copy large file in the same bucket +func (b *Bucket) CopyLargeFile(sourcePath string, destPath string, contentType string, perm ACL, options Options) error { + + log.Printf("Copy large file from %s to %s\n", sourcePath, destPath) + + currentLength, err := b.GetContentLength(sourcePath) + + if err != nil { + return err + } + + multi, err := b.InitMulti(destPath, contentType, perm, options) + if err != nil { + return err + } + + parts := []Part{} + + defer func() { + if len(parts) > 0 { + if multi == nil { + // Parts should be empty if the multi is not initialized + panic("Unreachable") + } else { + if multi.Complete(parts) != nil { + multi.Abort() + } + } + } + }() + + var start int64 = 0 + var to int64 = 0 + var partNumber = 0 + sourcePathForCopy := b.Path(sourcePath) + + for start = 0; start < currentLength; start = to { + to = start + defaultChunkSize + if to > currentLength { + to = currentLength + } + partNumber++ + + rangeStr := fmt.Sprintf("bytes=%d-%d", start, to-1) + + _, part, err := multi.PutPartCopy(partNumber, + CopyOptions{CopySourceOptions: rangeStr}, + sourcePathForCopy) + + if err != nil { + return err + } + parts = append(parts, part) + } + + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/multi.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/multi.go index 5b6491ecc192..f17d75c3d315 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/multi.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/multi.go @@ -8,6 +8,7 @@ import ( "encoding/xml" "errors" "io" + "time" //"log" "net/http" "net/url" @@ -106,6 +107,8 @@ func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, // InitMulti initializes a new multipart upload at the provided // key inside b and returns a value for manipulating it. // +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&InitiateMultipartUpload func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) { headers := make(http.Header) headers.Set("Content-Length", "0") @@ -138,6 +141,8 @@ func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Option return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil } +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&UploadPartCopy func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) { // TODO source format a /BUCKET/PATH/TO/OBJECT // TODO not a good design. API could be changed to PutPartCopyWithinBucket(..., path) and PutPartCopyFromBucket(bucket, path) @@ -187,15 +192,25 @@ func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObj // PutPart sends part n of the multipart upload, reading all the content from r. // Each part, except for the last one, must be at least 5MB in size. // +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&UploadPart func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { partSize, _, md5b64, err := seekerInfo(r) if err != nil { return Part{}, err } - return m.putPart(n, r, partSize, md5b64) + return m.putPart(n, r, partSize, md5b64, 0) +} + +func (m *Multi) PutPartWithTimeout(n int, r io.ReadSeeker, timeout time.Duration) (Part, error) { + partSize, _, md5b64, err := seekerInfo(r) + if err != nil { + return Part{}, err + } + return m.putPart(n, r, partSize, md5b64, timeout) } -func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) { +func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string, timeout time.Duration) (Part, error) { headers := make(http.Header) headers.Set("Content-Length", strconv.FormatInt(partSize, 10)) headers.Set("Content-MD5", md5b64) @@ -216,6 +231,7 @@ func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) ( headers: headers, params: params, payload: r, + timeout: timeout, } err = m.Bucket.Client.prepare(req) if err != nil { @@ -370,7 +386,7 @@ NextSection: } // Part wasn't found or doesn't match. Send it. - part, err := m.putPart(current, section, partSize, md5b64) + part, err := m.putPart(current, section, partSize, md5b64, 0) if err != nil { return nil, err } @@ -443,6 +459,8 @@ func (m *Multi) Complete(parts []Part) error { // handled internally, but it's not clear what happens precisely (Is an // error returned? Is the issue completely undetectable?). // +// +// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&AbortMultipartUpload func (m *Multi) Abort() error { params := make(url.Values) params.Set("uploadId", m.UploadId) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/regions.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/regions.go index 2bba73827a7e..e2daf3da65ba 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/regions.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/regions.go @@ -9,12 +9,15 @@ type Region string // Constants of region definition const ( - Hangzhou = Region("oss-cn-hangzhou") - Qingdao = Region("oss-cn-qingdao") - Beijing = Region("oss-cn-beijing") - Hongkong = Region("oss-cn-hongkong") - Shenzhen = Region("oss-cn-shenzhen") - USWest1 = Region("oss-us-west-1") + Hangzhou = Region("oss-cn-hangzhou") + Qingdao = Region("oss-cn-qingdao") + Beijing = Region("oss-cn-beijing") + Hongkong = Region("oss-cn-hongkong") + Shenzhen = Region("oss-cn-shenzhen") + USWest1 = Region("oss-us-west-1") + APSouthEast1 = Region("oss-ap-southeast-1") + Shanghai = Region("oss-cn-shanghai") + DefaultRegion = Hangzhou ) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/encoding.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/encoding.go index 56b900afa53f..4413957091d0 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/encoding.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/encoding.go @@ -23,13 +23,24 @@ func SetQueryValues(ifc interface{}, values *url.Values) { } func setQueryValues(i interface{}, values *url.Values, prefix string) { + // add to support url.Values + mapValues, ok := i.(url.Values) + if ok { + for k, _ := range mapValues { + values.Set(k, mapValues.Get(k)) + } + return + } + elem := reflect.ValueOf(i) if elem.Kind() == reflect.Ptr { elem = elem.Elem() } elemType := elem.Type() + for i := 0; i < elem.NumField(); i++ { fieldName := elemType.Field(i).Name + anonymous := elemType.Field(i).Anonymous field := elem.Field(i) // TODO Use Tag for validation // tag := typ.Field(i).Tag.Get("tagname") @@ -62,6 +73,19 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) { value = strconv.FormatBool(field.Bool()) case reflect.String: value = field.String() + case reflect.Map: + ifc := field.Interface() + m := ifc.(map[string]string) + if m != nil { + j := 0 + for k, v := range m { + j++ + keyName := fmt.Sprintf("%s.%d.Key", fieldName, j) + values.Set(keyName, k) + valueName := fmt.Sprintf("%s.%d.Value", fieldName, j) + values.Set(valueName, v) + } + } case reflect.Slice: switch field.Type().Elem().Kind() { case reflect.Uint8: @@ -85,7 +109,7 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) { for j := 0; j < l; j++ { prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1)) ifc := field.Index(j).Interface() - log.Printf("%s : %v", prefixName, ifc) + //log.Printf("%s : %v", prefixName, ifc) if ifc != nil { setQueryValues(ifc, values, prefixName) } @@ -104,7 +128,12 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) { default: ifc := field.Interface() if ifc != nil { - SetQueryValues(ifc, values) + if anonymous { + SetQueryValues(ifc, values) + } else { + prefixName := fieldName + "." + setQueryValues(ifc, values, prefixName) + } continue } } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/iso6801.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/iso6801.go index 031b61751dcb..9c25e8f68834 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/iso6801.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/iso6801.go @@ -2,6 +2,7 @@ package util import ( "fmt" + "strconv" "time" ) @@ -13,6 +14,8 @@ func GetISO8601TimeStamp(ts time.Time) string { const formatISO8601 = "2006-01-02T15:04:05Z" const jsonFormatISO8601 = `"` + formatISO8601 + `"` +const formatISO8601withoutSeconds = "2006-01-02T15:04Z" +const jsonFormatISO8601withoutSeconds = `"` + formatISO8601withoutSeconds + `"` // A ISO6801Time represents a time in ISO8601 format type ISO6801Time time.Time @@ -46,10 +49,25 @@ func (it ISO6801Time) MarshalJSON() ([]byte, error) { // UnmarshalJSON deserializes the ISO6801Time from JSON string func (it *ISO6801Time) UnmarshalJSON(data []byte) error { - if string(data) == "\"\"" { + str := string(data) + + if str == "\"\"" || len(data) == 0 { return nil } - t, err := time.ParseInLocation(jsonFormatISO8601, string(data), time.UTC) + var t time.Time + var err error + if str[0] == '"' { + t, err = time.ParseInLocation(jsonFormatISO8601, str, time.UTC) + if err != nil { + t, err = time.ParseInLocation(jsonFormatISO8601withoutSeconds, str, time.UTC) + } + } else { + var i int64 + i, err = strconv.ParseInt(str, 10, 64) + if err == nil { + t = time.Unix(i/1000, i%1000) + } + } if err == nil { *it = ISO6801Time(t) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/util.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/util.go index daa6bb02ec28..dd68214e3b96 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/util.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/util.go @@ -8,18 +8,31 @@ import ( "net/http" "net/url" "sort" - "strconv" "time" ) +const dictionary = "_0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + //CreateRandomString create random string func CreateRandomString() string { + b := make([]byte, 32) + l := len(dictionary) + + _, err := srand.Read(b) - rand.Seed(time.Now().UnixNano()) - randInt := rand.Int63() - randStr := strconv.FormatInt(randInt, 36) + if err != nil { + // fail back to insecure rand + rand.Seed(time.Now().UnixNano()) + for i := range b { + b[i] = dictionary[rand.Int()%l] + } + } else { + for i, v := range b { + b[i] = dictionary[v%byte(l)] + } + } - return randStr + return string(b) } // Encode encodes the values into ``URL encoded'' form diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/client_conn_pool.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/client_conn_pool.go new file mode 100644 index 000000000000..772ea5e92441 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -0,0 +1,225 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code's client connection pooling. + +package http2 + +import ( + "crypto/tls" + "net/http" + "sync" +) + +// ClientConnPool manages a pool of HTTP/2 client connections. +type ClientConnPool interface { + GetClientConn(req *http.Request, addr string) (*ClientConn, error) + MarkDead(*ClientConn) +} + +// TODO: use singleflight for dialing and addConnCalls? +type clientConnPool struct { + t *Transport + + mu sync.Mutex // TODO: maybe switch to RWMutex + // TODO: add support for sharing conns based on cert names + // (e.g. share conn for googleapis.com and appspot.com) + conns map[string][]*ClientConn // key is host:port + dialing map[string]*dialCall // currently in-flight dials + keys map[*ClientConn][]string + addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls +} + +func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, dialOnMiss) +} + +const ( + dialOnMiss = true + noDialOnMiss = false +) + +func (p *clientConnPool) getClientConn(_ *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + p.mu.Lock() + for _, cc := range p.conns[addr] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return cc, nil + } + } + if !dialOnMiss { + p.mu.Unlock() + return nil, ErrNoCachedConn + } + call := p.getStartDialLocked(addr) + p.mu.Unlock() + <-call.done + return call.res, call.err +} + +// dialCall is an in-flight Transport dial call to a host. +type dialCall struct { + p *clientConnPool + done chan struct{} // closed when done + res *ClientConn // valid after done is closed + err error // valid after done is closed +} + +// requires p.mu is held. +func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { + if call, ok := p.dialing[addr]; ok { + // A dial is already in-flight. Don't start another. + return call + } + call := &dialCall{p: p, done: make(chan struct{})} + if p.dialing == nil { + p.dialing = make(map[string]*dialCall) + } + p.dialing[addr] = call + go call.dial(addr) + return call +} + +// run in its own goroutine. +func (c *dialCall) dial(addr string) { + c.res, c.err = c.p.t.dialClientConn(addr) + close(c.done) + + c.p.mu.Lock() + delete(c.p.dialing, addr) + if c.err == nil { + c.p.addConnLocked(addr, c.res) + } + c.p.mu.Unlock() +} + +// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't +// already exist. It coalesces concurrent calls with the same key. +// This is used by the http1 Transport code when it creates a new connection. Because +// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know +// the protocol), it can get into a situation where it has multiple TLS connections. +// This code decides which ones live or die. +// The return value used is whether c was used. +// c is never closed. +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { + p.mu.Lock() + for _, cc := range p.conns[key] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return false, nil + } + } + call, dup := p.addConnCalls[key] + if !dup { + if p.addConnCalls == nil { + p.addConnCalls = make(map[string]*addConnCall) + } + call = &addConnCall{ + p: p, + done: make(chan struct{}), + } + p.addConnCalls[key] = call + go call.run(t, key, c) + } + p.mu.Unlock() + + <-call.done + if call.err != nil { + return false, call.err + } + return !dup, nil +} + +type addConnCall struct { + p *clientConnPool + done chan struct{} // closed when done + err error +} + +func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { + cc, err := t.NewClientConn(tc) + + p := c.p + p.mu.Lock() + if err != nil { + c.err = err + } else { + p.addConnLocked(key, cc) + } + delete(p.addConnCalls, key) + p.mu.Unlock() + close(c.done) +} + +func (p *clientConnPool) addConn(key string, cc *ClientConn) { + p.mu.Lock() + p.addConnLocked(key, cc) + p.mu.Unlock() +} + +// p.mu must be held +func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { + for _, v := range p.conns[key] { + if v == cc { + return + } + } + if p.conns == nil { + p.conns = make(map[string][]*ClientConn) + } + if p.keys == nil { + p.keys = make(map[*ClientConn][]string) + } + p.conns[key] = append(p.conns[key], cc) + p.keys[cc] = append(p.keys[cc], key) +} + +func (p *clientConnPool) MarkDead(cc *ClientConn) { + p.mu.Lock() + defer p.mu.Unlock() + for _, key := range p.keys[cc] { + vv, ok := p.conns[key] + if !ok { + continue + } + newList := filterOutClientConn(vv, cc) + if len(newList) > 0 { + p.conns[key] = newList + } else { + delete(p.conns, key) + } + } + delete(p.keys, cc) +} + +func (p *clientConnPool) closeIdleConnections() { + p.mu.Lock() + defer p.mu.Unlock() + // TODO: don't close a cc if it was just added to the pool + // milliseconds ago and has never been used. There's currently + // a small race window with the HTTP/1 Transport's integration + // where it can add an idle conn just before using it, and + // somebody else can concurrently call CloseIdleConns and + // break some caller's RoundTrip. + for _, vv := range p.conns { + for _, cc := range vv { + cc.closeIfIdle() + } + } +} + +func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { + out := in[:0] + for _, v := range in { + if v != exclude { + out = append(out, v) + } + } + // If we filtered it out, zero out the last item to prevent + // the GC from seeing it. + if len(in) != len(out) { + in[len(in)-1] = nil + } + return out +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/configure_transport.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/configure_transport.go new file mode 100644 index 000000000000..daa17f5d43ba --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/configure_transport.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.6 + +package http2 + +import ( + "crypto/tls" + "fmt" + "net/http" +) + +func configureTransport(t1 *http.Transport) (*Transport, error) { + connPool := new(clientConnPool) + t2 := &Transport{ + ConnPool: noDialClientConnPool{connPool}, + t1: t1, + } + connPool.t = t2 + if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { + return nil, err + } + if t1.TLSClientConfig == nil { + t1.TLSClientConfig = new(tls.Config) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { + t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { + t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") + } + upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { + addr := authorityAddr(authority) + if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { + go c.Close() + return erringRoundTripper{err} + } else if !used { + // Turns out we don't need this c. + // For example, two goroutines made requests to the same host + // at the same time, both kicking off TCP dials. (since protocol + // was unknown) + go c.Close() + } + return t2 + } + if m := t1.TLSNextProto; len(m) == 0 { + t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ + "h2": upgradeFn, + } + } else { + m["h2"] = upgradeFn + } + return t2, nil +} + +// registerHTTPSProtocol calls Transport.RegisterProtocol but +// convering panics into errors. +func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + t.RegisterProtocol("https", rt) + return nil +} + +// noDialClientConnPool is an implementation of http2.ClientConnPool +// which never dials. We let the HTTP/1.1 client dial and use its TLS +// connection instead. +type noDialClientConnPool struct{ *clientConnPool } + +func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, noDialOnMiss) +} + +// noDialH2RoundTripper is a RoundTripper which only tries to complete the request +// if there's already has a cached connection to the host. +type noDialH2RoundTripper struct{ t *Transport } + +func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := rt.t.RoundTrip(req) + if err == ErrNoCachedConn { + return nil, http.ErrSkipAltProtocol + } + return res, err +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/errors.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/errors.go new file mode 100644 index 000000000000..71a4e2905683 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/errors.go @@ -0,0 +1,122 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" +) + +// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. +type ErrCode uint32 + +const ( + ErrCodeNo ErrCode = 0x0 + ErrCodeProtocol ErrCode = 0x1 + ErrCodeInternal ErrCode = 0x2 + ErrCodeFlowControl ErrCode = 0x3 + ErrCodeSettingsTimeout ErrCode = 0x4 + ErrCodeStreamClosed ErrCode = 0x5 + ErrCodeFrameSize ErrCode = 0x6 + ErrCodeRefusedStream ErrCode = 0x7 + ErrCodeCancel ErrCode = 0x8 + ErrCodeCompression ErrCode = 0x9 + ErrCodeConnect ErrCode = 0xa + ErrCodeEnhanceYourCalm ErrCode = 0xb + ErrCodeInadequateSecurity ErrCode = 0xc + ErrCodeHTTP11Required ErrCode = 0xd +) + +var errCodeName = map[ErrCode]string{ + ErrCodeNo: "NO_ERROR", + ErrCodeProtocol: "PROTOCOL_ERROR", + ErrCodeInternal: "INTERNAL_ERROR", + ErrCodeFlowControl: "FLOW_CONTROL_ERROR", + ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", + ErrCodeStreamClosed: "STREAM_CLOSED", + ErrCodeFrameSize: "FRAME_SIZE_ERROR", + ErrCodeRefusedStream: "REFUSED_STREAM", + ErrCodeCancel: "CANCEL", + ErrCodeCompression: "COMPRESSION_ERROR", + ErrCodeConnect: "CONNECT_ERROR", + ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", + ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", + ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", +} + +func (e ErrCode) String() string { + if s, ok := errCodeName[e]; ok { + return s + } + return fmt.Sprintf("unknown error code 0x%x", uint32(e)) +} + +// ConnectionError is an error that results in the termination of the +// entire connection. +type ConnectionError ErrCode + +func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } + +// StreamError is an error that only affects one stream within an +// HTTP/2 connection. +type StreamError struct { + StreamID uint32 + Code ErrCode +} + +func (e StreamError) Error() string { + return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) +} + +// 6.9.1 The Flow Control Window +// "If a sender receives a WINDOW_UPDATE that causes a flow control +// window to exceed this maximum it MUST terminate either the stream +// or the connection, as appropriate. For streams, [...]; for the +// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." +type goAwayFlowError struct{} + +func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } + +// connErrorReason wraps a ConnectionError with an informative error about why it occurs. + +// Errors of this type are only returned by the frame parser functions +// and converted into ConnectionError(ErrCodeProtocol). +type connError struct { + Code ErrCode + Reason string +} + +func (e connError) Error() string { + return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) +} + +type pseudoHeaderError string + +func (e pseudoHeaderError) Error() string { + return fmt.Sprintf("invalid pseudo-header %q", string(e)) +} + +type duplicatePseudoHeaderError string + +func (e duplicatePseudoHeaderError) Error() string { + return fmt.Sprintf("duplicate pseudo-header %q", string(e)) +} + +type headerFieldNameError string + +func (e headerFieldNameError) Error() string { + return fmt.Sprintf("invalid header field name %q", string(e)) +} + +type headerFieldValueError string + +func (e headerFieldValueError) Error() string { + return fmt.Sprintf("invalid header field value %q", string(e)) +} + +var ( + errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") + errPseudoAfterRegular = errors.New("pseudo header field after regular") +) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/fixed_buffer.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/fixed_buffer.go new file mode 100644 index 000000000000..47da0f0bf71f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/fixed_buffer.go @@ -0,0 +1,60 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" +) + +// fixedBuffer is an io.ReadWriter backed by a fixed size buffer. +// It never allocates, but moves old data as new data is written. +type fixedBuffer struct { + buf []byte + r, w int +} + +var ( + errReadEmpty = errors.New("read from empty fixedBuffer") + errWriteFull = errors.New("write on full fixedBuffer") +) + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *fixedBuffer) Read(p []byte) (n int, err error) { + if b.r == b.w { + return 0, errReadEmpty + } + n = copy(p, b.buf[b.r:b.w]) + b.r += n + if b.r == b.w { + b.r = 0 + b.w = 0 + } + return n, nil +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *fixedBuffer) Len() int { + return b.w - b.r +} + +// Write copies bytes from p into the buffer. +// It is an error to write more data than the buffer can hold. +func (b *fixedBuffer) Write(p []byte) (n int, err error) { + // Slide existing data to beginning. + if b.r > 0 && len(p) > len(b.buf)-b.w { + copy(b.buf, b.buf[b.r:b.w]) + b.w -= b.r + b.r = 0 + } + + // Write new data. + n = copy(b.buf[b.w:], p) + b.w += n + if n < len(p) { + err = errWriteFull + } + return n, err +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/flow.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/flow.go new file mode 100644 index 000000000000..957de25420d9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/flow.go @@ -0,0 +1,50 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Flow control + +package http2 + +// flow is the flow control window's size. +type flow struct { + // n is the number of DATA bytes we're allowed to send. + // A flow is kept both on a conn and a per-stream. + n int32 + + // conn points to the shared connection-level flow that is + // shared by all streams on that conn. It is nil for the flow + // that's on the conn directly. + conn *flow +} + +func (f *flow) setConnFlow(cf *flow) { f.conn = cf } + +func (f *flow) available() int32 { + n := f.n + if f.conn != nil && f.conn.n < n { + n = f.conn.n + } + return n +} + +func (f *flow) take(n int32) { + if n > f.available() { + panic("internal error: took too much") + } + f.n -= n + if f.conn != nil { + f.conn.n -= n + } +} + +// add adds n bytes (positive or negative) to the flow control window. +// It returns false if the sum would exceed 2^31-1. +func (f *flow) add(n int32) bool { + remain := (1<<31 - 1) - f.n + if n > remain { + return false + } + f.n += n + return true +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/frame.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/frame.go new file mode 100644 index 000000000000..6943f93380c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/frame.go @@ -0,0 +1,1496 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "strings" + "sync" + + "golang.org/x/net/http2/hpack" +) + +const frameHeaderLen = 9 + +var padZeros = make([]byte, 255) // zeros for padding + +// A FrameType is a registered frame type as defined in +// http://http2.github.io/http2-spec/#rfc.section.11.2 +type FrameType uint8 + +const ( + FrameData FrameType = 0x0 + FrameHeaders FrameType = 0x1 + FramePriority FrameType = 0x2 + FrameRSTStream FrameType = 0x3 + FrameSettings FrameType = 0x4 + FramePushPromise FrameType = 0x5 + FramePing FrameType = 0x6 + FrameGoAway FrameType = 0x7 + FrameWindowUpdate FrameType = 0x8 + FrameContinuation FrameType = 0x9 +) + +var frameName = map[FrameType]string{ + FrameData: "DATA", + FrameHeaders: "HEADERS", + FramePriority: "PRIORITY", + FrameRSTStream: "RST_STREAM", + FrameSettings: "SETTINGS", + FramePushPromise: "PUSH_PROMISE", + FramePing: "PING", + FrameGoAway: "GOAWAY", + FrameWindowUpdate: "WINDOW_UPDATE", + FrameContinuation: "CONTINUATION", +} + +func (t FrameType) String() string { + if s, ok := frameName[t]; ok { + return s + } + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) +} + +// Flags is a bitmask of HTTP/2 flags. +// The meaning of flags varies depending on the frame type. +type Flags uint8 + +// Has reports whether f contains all (0 or more) flags in v. +func (f Flags) Has(v Flags) bool { + return (f & v) == v +} + +// Frame-specific FrameHeader flag bits. +const ( + // Data Frame + FlagDataEndStream Flags = 0x1 + FlagDataPadded Flags = 0x8 + + // Headers Frame + FlagHeadersEndStream Flags = 0x1 + FlagHeadersEndHeaders Flags = 0x4 + FlagHeadersPadded Flags = 0x8 + FlagHeadersPriority Flags = 0x20 + + // Settings Frame + FlagSettingsAck Flags = 0x1 + + // Ping Frame + FlagPingAck Flags = 0x1 + + // Continuation Frame + FlagContinuationEndHeaders Flags = 0x4 + + FlagPushPromiseEndHeaders Flags = 0x4 + FlagPushPromisePadded Flags = 0x8 +) + +var flagName = map[FrameType]map[Flags]string{ + FrameData: { + FlagDataEndStream: "END_STREAM", + FlagDataPadded: "PADDED", + }, + FrameHeaders: { + FlagHeadersEndStream: "END_STREAM", + FlagHeadersEndHeaders: "END_HEADERS", + FlagHeadersPadded: "PADDED", + FlagHeadersPriority: "PRIORITY", + }, + FrameSettings: { + FlagSettingsAck: "ACK", + }, + FramePing: { + FlagPingAck: "ACK", + }, + FrameContinuation: { + FlagContinuationEndHeaders: "END_HEADERS", + }, + FramePushPromise: { + FlagPushPromiseEndHeaders: "END_HEADERS", + FlagPushPromisePadded: "PADDED", + }, +} + +// a frameParser parses a frame given its FrameHeader and payload +// bytes. The length of payload will always equal fh.Length (which +// might be 0). +type frameParser func(fh FrameHeader, payload []byte) (Frame, error) + +var frameParsers = map[FrameType]frameParser{ + FrameData: parseDataFrame, + FrameHeaders: parseHeadersFrame, + FramePriority: parsePriorityFrame, + FrameRSTStream: parseRSTStreamFrame, + FrameSettings: parseSettingsFrame, + FramePushPromise: parsePushPromise, + FramePing: parsePingFrame, + FrameGoAway: parseGoAwayFrame, + FrameWindowUpdate: parseWindowUpdateFrame, + FrameContinuation: parseContinuationFrame, +} + +func typeFrameParser(t FrameType) frameParser { + if f := frameParsers[t]; f != nil { + return f + } + return parseUnknownFrame +} + +// A FrameHeader is the 9 byte header of all HTTP/2 frames. +// +// See http://http2.github.io/http2-spec/#FrameHeader +type FrameHeader struct { + valid bool // caller can access []byte fields in the Frame + + // Type is the 1 byte frame type. There are ten standard frame + // types, but extension frame types may be written by WriteRawFrame + // and will be returned by ReadFrame (as UnknownFrame). + Type FrameType + + // Flags are the 1 byte of 8 potential bit flags per frame. + // They are specific to the frame type. + Flags Flags + + // Length is the length of the frame, not including the 9 byte header. + // The maximum size is one byte less than 16MB (uint24), but only + // frames up to 16KB are allowed without peer agreement. + Length uint32 + + // StreamID is which stream this frame is for. Certain frames + // are not stream-specific, in which case this field is 0. + StreamID uint32 +} + +// Header returns h. It exists so FrameHeaders can be embedded in other +// specific frame types and implement the Frame interface. +func (h FrameHeader) Header() FrameHeader { return h } + +func (h FrameHeader) String() string { + var buf bytes.Buffer + buf.WriteString("[FrameHeader ") + h.writeDebug(&buf) + buf.WriteByte(']') + return buf.String() +} + +func (h FrameHeader) writeDebug(buf *bytes.Buffer) { + buf.WriteString(h.Type.String()) + if h.Flags != 0 { + buf.WriteString(" flags=") + set := 0 + for i := uint8(0); i < 8; i++ { + if h.Flags&(1< 1 { + buf.WriteByte('|') + } + name := flagName[h.Type][Flags(1<>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) +} + +func (f *Framer) endWrite() error { + // Now that we know the final size, fill in the FrameHeader in + // the space previously reserved for it. Abuse append. + length := len(f.wbuf) - frameHeaderLen + if length >= (1 << 24) { + return ErrFrameTooLarge + } + _ = append(f.wbuf[:0], + byte(length>>16), + byte(length>>8), + byte(length)) + if logFrameWrites { + f.logWrite() + } + + n, err := f.w.Write(f.wbuf) + if err == nil && n != len(f.wbuf) { + err = io.ErrShortWrite + } + return err +} + +func (f *Framer) logWrite() { + if f.debugFramer == nil { + f.debugFramerBuf = new(bytes.Buffer) + f.debugFramer = NewFramer(nil, f.debugFramerBuf) + f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below + // Let us read anything, even if we accidentally wrote it + // in the wrong order: + f.debugFramer.AllowIllegalReads = true + } + f.debugFramerBuf.Write(f.wbuf) + fr, err := f.debugFramer.ReadFrame() + if err != nil { + log.Printf("http2: Framer %p: failed to decode just-written frame", f) + return + } + log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) +} + +func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } +func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } +func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } +func (f *Framer) writeUint32(v uint32) { + f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +const ( + minMaxFrameSize = 1 << 14 + maxFrameSize = 1<<24 - 1 +) + +// NewFramer returns a Framer that writes frames to w and reads them from r. +func NewFramer(w io.Writer, r io.Reader) *Framer { + fr := &Framer{ + w: w, + r: r, + logReads: logFrameReads, + } + fr.getReadBuf = func(size uint32) []byte { + if cap(fr.readBuf) >= int(size) { + return fr.readBuf[:size] + } + fr.readBuf = make([]byte, size) + return fr.readBuf + } + fr.SetMaxReadFrameSize(maxFrameSize) + return fr +} + +// SetMaxReadFrameSize sets the maximum size of a frame +// that will be read by a subsequent call to ReadFrame. +// It is the caller's responsibility to advertise this +// limit with a SETTINGS frame. +func (fr *Framer) SetMaxReadFrameSize(v uint32) { + if v > maxFrameSize { + v = maxFrameSize + } + fr.maxReadSize = v +} + +// ErrorDetail returns a more detailed error of the last error +// returned by Framer.ReadFrame. For instance, if ReadFrame +// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail +// will say exactly what was invalid. ErrorDetail is not guaranteed +// to return a non-nil value and like the rest of the http2 package, +// its return value is not protected by an API compatibility promise. +// ErrorDetail is reset after the next call to ReadFrame. +func (fr *Framer) ErrorDetail() error { + return fr.errDetail +} + +// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer +// sends a frame that is larger than declared with SetMaxReadFrameSize. +var ErrFrameTooLarge = errors.New("http2: frame too large") + +// terminalReadFrameError reports whether err is an unrecoverable +// error from ReadFrame and no other frames should be read. +func terminalReadFrameError(err error) bool { + if _, ok := err.(StreamError); ok { + return false + } + return err != nil +} + +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from from the underlying +// reader. +func (fr *Framer) ReadFrame() (Frame, error) { + fr.errDetail = nil + if fr.lastFrame != nil { + fr.lastFrame.invalidate() + } + fh, err := readFrameHeader(fr.headerBuf[:], fr.r) + if err != nil { + return nil, err + } + if fh.Length > fr.maxReadSize { + return nil, ErrFrameTooLarge + } + payload := fr.getReadBuf(fh.Length) + if _, err := io.ReadFull(fr.r, payload); err != nil { + return nil, err + } + f, err := typeFrameParser(fh.Type)(fh, payload) + if err != nil { + if ce, ok := err.(connError); ok { + return nil, fr.connError(ce.Code, ce.Reason) + } + return nil, err + } + if err := fr.checkFrameOrder(f); err != nil { + return nil, err + } + if fr.logReads { + log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f)) + } + if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { + return fr.readMetaFrame(f.(*HeadersFrame)) + } + return f, nil +} + +// connError returns ConnectionError(code) but first +// stashes away a public reason to the caller can optionally relay it +// to the peer before hanging up on them. This might help others debug +// their implementations. +func (fr *Framer) connError(code ErrCode, reason string) error { + fr.errDetail = errors.New(reason) + return ConnectionError(code) +} + +// checkFrameOrder reports an error if f is an invalid frame to return +// next from ReadFrame. Mostly it checks whether HEADERS and +// CONTINUATION frames are contiguous. +func (fr *Framer) checkFrameOrder(f Frame) error { + last := fr.lastFrame + fr.lastFrame = f + if fr.AllowIllegalReads { + return nil + } + + fh := f.Header() + if fr.lastHeaderStream != 0 { + if fh.Type != FrameContinuation { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", + fh.Type, fh.StreamID, + last.Header().Type, fr.lastHeaderStream)) + } + if fh.StreamID != fr.lastHeaderStream { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", + fh.StreamID, fr.lastHeaderStream)) + } + } else if fh.Type == FrameContinuation { + return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) + } + + switch fh.Type { + case FrameHeaders, FrameContinuation: + if fh.Flags.Has(FlagHeadersEndHeaders) { + fr.lastHeaderStream = 0 + } else { + fr.lastHeaderStream = fh.StreamID + } + } + + return nil +} + +// A DataFrame conveys arbitrary, variable-length sequences of octets +// associated with a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.1 +type DataFrame struct { + FrameHeader + data []byte +} + +func (f *DataFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagDataEndStream) +} + +// Data returns the frame's data octets, not including any padding +// size byte or padding suffix bytes. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *DataFrame) Data() []byte { + f.checkValid() + return f.data +} + +func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} + } + f := &DataFrame{ + FrameHeader: fh, + } + var padSize byte + if fh.Flags.Has(FlagDataPadded) { + var err error + payload, padSize, err = readByte(payload) + if err != nil { + return nil, err + } + } + if int(padSize) > len(payload) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} + } + f.data = payload[:len(payload)-int(padSize)] + return f, nil +} + +var errStreamID = errors.New("invalid streamid") + +func validStreamID(streamID uint32) bool { + return streamID != 0 && streamID&(1<<31) == 0 +} + +// WriteData writes a DATA frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { + // TODO: ignoring padding for now. will add when somebody cares. + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if endStream { + flags |= FlagDataEndStream + } + f.startWrite(FrameData, flags, streamID) + f.wbuf = append(f.wbuf, data...) + return f.endWrite() +} + +// A SettingsFrame conveys configuration parameters that affect how +// endpoints communicate, such as preferences and constraints on peer +// behavior. +// +// See http://http2.github.io/http2-spec/#SETTINGS +type SettingsFrame struct { + FrameHeader + p []byte +} + +func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { + if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { + // When this (ACK 0x1) bit is set, the payload of the + // SETTINGS frame MUST be empty. Receipt of a + // SETTINGS frame with the ACK flag set and a length + // field value other than 0 MUST be treated as a + // connection error (Section 5.4.1) of type + // FRAME_SIZE_ERROR. + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + // SETTINGS frames always apply to a connection, + // never a single stream. The stream identifier for a + // SETTINGS frame MUST be zero (0x0). If an endpoint + // receives a SETTINGS frame whose stream identifier + // field is anything other than 0x0, the endpoint MUST + // respond with a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p)%6 != 0 { + // Expecting even number of 6 byte settings. + return nil, ConnectionError(ErrCodeFrameSize) + } + f := &SettingsFrame{FrameHeader: fh, p: p} + if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { + // Values above the maximum flow control window size of 2^31 - 1 MUST + // be treated as a connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + return nil, ConnectionError(ErrCodeFlowControl) + } + return f, nil +} + +func (f *SettingsFrame) IsAck() bool { + return f.FrameHeader.Flags.Has(FlagSettingsAck) +} + +func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { + f.checkValid() + buf := f.p + for len(buf) > 0 { + settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) + if settingID == s { + return binary.BigEndian.Uint32(buf[2:6]), true + } + buf = buf[6:] + } + return 0, false +} + +// ForeachSetting runs fn for each setting. +// It stops and returns the first error. +func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { + f.checkValid() + buf := f.p + for len(buf) > 0 { + if err := fn(Setting{ + SettingID(binary.BigEndian.Uint16(buf[:2])), + binary.BigEndian.Uint32(buf[2:6]), + }); err != nil { + return err + } + buf = buf[6:] + } + return nil +} + +// WriteSettings writes a SETTINGS frame with zero or more settings +// specified and the ACK bit not set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettings(settings ...Setting) error { + f.startWrite(FrameSettings, 0, 0) + for _, s := range settings { + f.writeUint16(uint16(s.ID)) + f.writeUint32(s.Val) + } + return f.endWrite() +} + +// WriteSettings writes an empty SETTINGS frame with the ACK bit set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettingsAck() error { + f.startWrite(FrameSettings, FlagSettingsAck, 0) + return f.endWrite() +} + +// A PingFrame is a mechanism for measuring a minimal round trip time +// from the sender, as well as determining whether an idle connection +// is still functional. +// See http://http2.github.io/http2-spec/#rfc.section.6.7 +type PingFrame struct { + FrameHeader + Data [8]byte +} + +func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } + +func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { + if len(payload) != 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + f := &PingFrame{FrameHeader: fh} + copy(f.Data[:], payload) + return f, nil +} + +func (f *Framer) WritePing(ack bool, data [8]byte) error { + var flags Flags + if ack { + flags = FlagPingAck + } + f.startWrite(FramePing, flags, 0) + f.writeBytes(data[:]) + return f.endWrite() +} + +// A GoAwayFrame informs the remote peer to stop creating streams on this connection. +// See http://http2.github.io/http2-spec/#rfc.section.6.8 +type GoAwayFrame struct { + FrameHeader + LastStreamID uint32 + ErrCode ErrCode + debugData []byte +} + +// DebugData returns any debug data in the GOAWAY frame. Its contents +// are not defined. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *GoAwayFrame) DebugData() []byte { + f.checkValid() + return f.debugData +} + +func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p) < 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + return &GoAwayFrame{ + FrameHeader: fh, + LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), + ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), + debugData: p[8:], + }, nil +} + +func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { + f.startWrite(FrameGoAway, 0, 0) + f.writeUint32(maxStreamID & (1<<31 - 1)) + f.writeUint32(uint32(code)) + f.writeBytes(debugData) + return f.endWrite() +} + +// An UnknownFrame is the frame type returned when the frame type is unknown +// or no specific frame type parser exists. +type UnknownFrame struct { + FrameHeader + p []byte +} + +// Payload returns the frame's payload (after the header). It is not +// valid to call this method after a subsequent call to +// Framer.ReadFrame, nor is it valid to retain the returned slice. +// The memory is owned by the Framer and is invalidated when the next +// frame is read. +func (f *UnknownFrame) Payload() []byte { + f.checkValid() + return f.p +} + +func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { + return &UnknownFrame{fh, p}, nil +} + +// A WindowUpdateFrame is used to implement flow control. +// See http://http2.github.io/http2-spec/#rfc.section.6.9 +type WindowUpdateFrame struct { + FrameHeader + Increment uint32 // never read with high bit set +} + +func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit + if inc == 0 { + // A receiver MUST treat the receipt of a + // WINDOW_UPDATE frame with an flow control window + // increment of 0 as a stream error (Section 5.4.2) of + // type PROTOCOL_ERROR; errors on the connection flow + // control window MUST be treated as a connection + // error (Section 5.4.1). + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return nil, StreamError{fh.StreamID, ErrCodeProtocol} + } + return &WindowUpdateFrame{ + FrameHeader: fh, + Increment: inc, + }, nil +} + +// WriteWindowUpdate writes a WINDOW_UPDATE frame. +// The increment value must be between 1 and 2,147,483,647, inclusive. +// If the Stream ID is zero, the window update applies to the +// connection as a whole. +func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { + // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." + if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { + return errors.New("illegal window increment value") + } + f.startWrite(FrameWindowUpdate, 0, streamID) + f.writeUint32(incr) + return f.endWrite() +} + +// A HeadersFrame is used to open a stream and additionally carries a +// header block fragment. +type HeadersFrame struct { + FrameHeader + + // Priority is set if FlagHeadersPriority is set in the FrameHeader. + Priority PriorityParam + + headerFragBuf []byte // not owned +} + +func (f *HeadersFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *HeadersFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) +} + +func (f *HeadersFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndStream) +} + +func (f *HeadersFrame) HasPriority() bool { + return f.FrameHeader.Flags.Has(FlagHeadersPriority) +} + +func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { + hf := &HeadersFrame{ + FrameHeader: fh, + } + if fh.StreamID == 0 { + // HEADERS frames MUST be associated with a stream. If a HEADERS frame + // is received whose stream identifier field is 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} + } + var padLength uint8 + if fh.Flags.Has(FlagHeadersPadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + if fh.Flags.Has(FlagHeadersPriority) { + var v uint32 + p, v, err = readUint32(p) + if err != nil { + return nil, err + } + hf.Priority.StreamDep = v & 0x7fffffff + hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set + p, hf.Priority.Weight, err = readByte(p) + if err != nil { + return nil, err + } + } + if len(p)-int(padLength) <= 0 { + return nil, StreamError{fh.StreamID, ErrCodeProtocol} + } + hf.headerFragBuf = p[:len(p)-int(padLength)] + return hf, nil +} + +// HeadersFrameParam are the parameters for writing a HEADERS frame. +type HeadersFrameParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndStream indicates that the header block is the last that + // the endpoint will send for the identified stream. Setting + // this flag causes the stream to enter one of "half closed" + // states. + EndStream bool + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 + + // Priority, if non-zero, includes stream priority information + // in the HEADER frame. + Priority PriorityParam +} + +// WriteHeaders writes a single HEADERS frame. +// +// This is a low-level header writing method. Encoding headers and +// splitting them into any necessary CONTINUATION frames is handled +// elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteHeaders(p HeadersFrameParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagHeadersPadded + } + if p.EndStream { + flags |= FlagHeadersEndStream + } + if p.EndHeaders { + flags |= FlagHeadersEndHeaders + } + if !p.Priority.IsZero() { + flags |= FlagHeadersPriority + } + f.startWrite(FrameHeaders, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !p.Priority.IsZero() { + v := p.Priority.StreamDep + if !validStreamID(v) && !f.AllowIllegalWrites { + return errors.New("invalid dependent stream id") + } + if p.Priority.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Priority.Weight) + } + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// A PriorityFrame specifies the sender-advised priority of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.3 +type PriorityFrame struct { + FrameHeader + PriorityParam +} + +// PriorityParam are the stream prioritzation parameters. +type PriorityParam struct { + // StreamDep is a 31-bit stream identifier for the + // stream that this stream depends on. Zero means no + // dependency. + StreamDep uint32 + + // Exclusive is whether the dependency is exclusive. + Exclusive bool + + // Weight is the stream's zero-indexed weight. It should be + // set together with StreamDep, or neither should be set. Per + // the spec, "Add one to the value to obtain a weight between + // 1 and 256." + Weight uint8 +} + +func (p PriorityParam) IsZero() bool { + return p == PriorityParam{} +} + +func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} + } + if len(payload) != 5 { + return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} + } + v := binary.BigEndian.Uint32(payload[:4]) + streamID := v & 0x7fffffff // mask off high bit + return &PriorityFrame{ + FrameHeader: fh, + PriorityParam: PriorityParam{ + Weight: payload[4], + StreamDep: streamID, + Exclusive: streamID != v, // was high bit set? + }, + }, nil +} + +// WritePriority writes a PRIORITY frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + f.startWrite(FramePriority, 0, streamID) + v := p.StreamDep + if p.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Weight) + return f.endWrite() +} + +// A RSTStreamFrame allows for abnormal termination of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.4 +type RSTStreamFrame struct { + FrameHeader + ErrCode ErrCode +} + +func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil +} + +// WriteRSTStream writes a RST_STREAM frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + f.startWrite(FrameRSTStream, 0, streamID) + f.writeUint32(uint32(code)) + return f.endWrite() +} + +// A ContinuationFrame is used to continue a sequence of header block fragments. +// See http://http2.github.io/http2-spec/#rfc.section.6.10 +type ContinuationFrame struct { + FrameHeader + headerFragBuf []byte +} + +func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} + } + return &ContinuationFrame{fh, p}, nil +} + +func (f *ContinuationFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *ContinuationFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) +} + +// WriteContinuation writes a CONTINUATION frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if endHeaders { + flags |= FlagContinuationEndHeaders + } + f.startWrite(FrameContinuation, flags, streamID) + f.wbuf = append(f.wbuf, headerBlockFragment...) + return f.endWrite() +} + +// A PushPromiseFrame is used to initiate a server stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.6 +type PushPromiseFrame struct { + FrameHeader + PromiseID uint32 + headerFragBuf []byte // not owned +} + +func (f *PushPromiseFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *PushPromiseFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) +} + +func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { + pp := &PushPromiseFrame{ + FrameHeader: fh, + } + if pp.StreamID == 0 { + // PUSH_PROMISE frames MUST be associated with an existing, + // peer-initiated stream. The stream identifier of a + // PUSH_PROMISE frame indicates the stream it is associated + // with. If the stream identifier field specifies the value + // 0x0, a recipient MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + // The PUSH_PROMISE frame includes optional padding. + // Padding fields and flags are identical to those defined for DATA frames + var padLength uint8 + if fh.Flags.Has(FlagPushPromisePadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + + p, pp.PromiseID, err = readUint32(p) + if err != nil { + return + } + pp.PromiseID = pp.PromiseID & (1<<31 - 1) + + if int(padLength) > len(p) { + // like the DATA frame, error out if padding is longer than the body. + return nil, ConnectionError(ErrCodeProtocol) + } + pp.headerFragBuf = p[:len(p)-int(padLength)] + return pp, nil +} + +// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. +type PushPromiseParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + + // PromiseID is the required Stream ID which this + // Push Promises + PromiseID uint32 + + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 +} + +// WritePushPromise writes a single PushPromise Frame. +// +// As with Header Frames, This is the low level call for writing +// individual frames. Continuation frames are handled elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePushPromise(p PushPromiseParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagPushPromisePadded + } + if p.EndHeaders { + flags |= FlagPushPromiseEndHeaders + } + f.startWrite(FramePushPromise, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { + return errStreamID + } + f.writeUint32(p.PromiseID) + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// WriteRawFrame writes a raw frame. This can be used to write +// extension frames unknown to this package. +func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { + f.startWrite(t, flags, streamID) + f.writeBytes(payload) + return f.endWrite() +} + +func readByte(p []byte) (remain []byte, b byte, err error) { + if len(p) == 0 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[1:], p[0], nil +} + +func readUint32(p []byte) (remain []byte, v uint32, err error) { + if len(p) < 4 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[4:], binary.BigEndian.Uint32(p[:4]), nil +} + +type streamEnder interface { + StreamEnded() bool +} + +type headersEnder interface { + HeadersEnded() bool +} + +type headersOrContinuation interface { + headersEnder + HeaderBlockFragment() []byte +} + +// A MetaHeadersFrame is the representation of one HEADERS frame and +// zero or more contiguous CONTINUATION frames and the decoding of +// their HPACK-encoded contents. +// +// This type of frame does not appear on the wire and is only returned +// by the Framer when Framer.ReadMetaHeaders is set. +type MetaHeadersFrame struct { + *HeadersFrame + + // Fields are the fields contained in the HEADERS and + // CONTINUATION frames. The underlying slice is owned by the + // Framer and must not be retained after the next call to + // ReadFrame. + // + // Fields are guaranteed to be in the correct http2 order and + // not have unknown pseudo header fields or invalid header + // field names or values. Required pseudo header fields may be + // missing, however. Use the MetaHeadersFrame.Pseudo accessor + // method access pseudo headers. + Fields []hpack.HeaderField + + // Truncated is whether the max header list size limit was hit + // and Fields is incomplete. The hpack decoder state is still + // valid, however. + Truncated bool +} + +// PseudoValue returns the given pseudo header field's value. +// The provided pseudo field should not contain the leading colon. +func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { + for _, hf := range mh.Fields { + if !hf.IsPseudo() { + return "" + } + if hf.Name[1:] == pseudo { + return hf.Value + } + } + return "" +} + +// RegularFields returns the regular (non-pseudo) header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[i:] + } + } + return nil +} + +// PseudoFields returns the pseudo header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[:i] + } + } + return mh.Fields +} + +func (mh *MetaHeadersFrame) checkPseudos() error { + var isRequest, isResponse bool + pf := mh.PseudoFields() + for i, hf := range pf { + switch hf.Name { + case ":method", ":path", ":scheme", ":authority": + isRequest = true + case ":status": + isResponse = true + default: + return pseudoHeaderError(hf.Name) + } + // Check for duplicates. + // This would be a bad algorithm, but N is 4. + // And this doesn't allocate. + for _, hf2 := range pf[:i] { + if hf.Name == hf2.Name { + return duplicatePseudoHeaderError(hf.Name) + } + } + } + if isRequest && isResponse { + return errMixPseudoHeaderTypes + } + return nil +} + +func (fr *Framer) maxHeaderStringLen() int { + v := fr.maxHeaderListSize() + if uint32(int(v)) == v { + return int(v) + } + // They had a crazy big number for MaxHeaderBytes anyway, + // so give them unlimited header lengths: + return 0 +} + +// readMetaFrame returns 0 or more CONTINUATION frames from fr and +// merge them into into the provided hf and returns a MetaHeadersFrame +// with the decoded hpack values. +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + if fr.AllowIllegalReads { + return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") + } + mh := &MetaHeadersFrame{ + HeadersFrame: hf, + } + var remainSize = fr.maxHeaderListSize() + var sawRegular bool + + var invalid error // pseudo header field errors + hdec := fr.ReadMetaHeaders + hdec.SetEmitEnabled(true) + hdec.SetMaxStringLength(fr.maxHeaderStringLen()) + hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if !validHeaderFieldValue(hf.Value) { + invalid = headerFieldValueError(hf.Value) + } + isPseudo := strings.HasPrefix(hf.Name, ":") + if isPseudo { + if sawRegular { + invalid = errPseudoAfterRegular + } + } else { + sawRegular = true + if !validHeaderFieldName(hf.Name) { + invalid = headerFieldNameError(hf.Name) + } + } + + if invalid != nil { + hdec.SetEmitEnabled(false) + return + } + + size := hf.Size() + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true + return + } + remainSize -= size + + mh.Fields = append(mh.Fields, hf) + }) + // Lose reference to MetaHeadersFrame: + defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) + + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() + if _, err := hdec.Write(frag); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + + if hc.HeadersEnded() { + break + } + if f, err := fr.ReadFrame(); err != nil { + return nil, err + } else { + hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder + } + } + + mh.HeadersFrame.headerFragBuf = nil + mh.HeadersFrame.invalidate() + + if err := hdec.Close(); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + if invalid != nil { + fr.errDetail = invalid + return nil, StreamError{mh.StreamID, ErrCodeProtocol} + } + if err := mh.checkPseudos(); err != nil { + fr.errDetail = err + return nil, StreamError{mh.StreamID, ErrCodeProtocol} + } + return mh, nil +} + +func summarizeFrame(f Frame) string { + var buf bytes.Buffer + f.Header().writeDebug(&buf) + switch f := f.(type) { + case *SettingsFrame: + n := 0 + f.ForeachSetting(func(s Setting) error { + n++ + if n == 1 { + buf.WriteString(", settings:") + } + fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) + return nil + }) + if n > 0 { + buf.Truncate(buf.Len() - 1) // remove trailing comma + } + case *DataFrame: + data := f.Data() + const max = 256 + if len(data) > max { + data = data[:max] + } + fmt.Fprintf(&buf, " data=%q", data) + if len(f.Data()) > max { + fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) + } + case *WindowUpdateFrame: + if f.StreamID == 0 { + buf.WriteString(" (conn)") + } + fmt.Fprintf(&buf, " incr=%v", f.Increment) + case *PingFrame: + fmt.Fprintf(&buf, " ping=%q", f.Data[:]) + case *GoAwayFrame: + fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", + f.LastStreamID, f.ErrCode, f.debugData) + case *RSTStreamFrame: + fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) + } + return buf.String() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/go15.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/go15.go new file mode 100644 index 000000000000..f0a562414155 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/go15.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package http2 + +import "net/http" + +func requestCancel(req *http.Request) <-chan struct{} { return req.Cancel } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/gotrack.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/gotrack.go new file mode 100644 index 000000000000..9933c9f8c748 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/gotrack.go @@ -0,0 +1,170 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Defensive debug-only utility to track that functions run on the +// goroutine that they're supposed to. + +package http2 + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "strconv" + "sync" +) + +var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" + +type goroutineLock uint64 + +func newGoroutineLock() goroutineLock { + if !DebugGoroutines { + return 0 + } + return goroutineLock(curGoroutineID()) +} + +func (g goroutineLock) check() { + if !DebugGoroutines { + return + } + if curGoroutineID() != uint64(g) { + panic("running on the wrong goroutine") + } +} + +func (g goroutineLock) checkNotOn() { + if !DebugGoroutines { + return + } + if curGoroutineID() == uint64(g) { + panic("running on the wrong goroutine") + } +} + +var goroutineSpace = []byte("goroutine ") + +func curGoroutineID() uint64 { + bp := littleBuf.Get().(*[]byte) + defer littleBuf.Put(bp) + b := *bp + b = b[:runtime.Stack(b, false)] + // Parse the 4707 out of "goroutine 4707 [" + b = bytes.TrimPrefix(b, goroutineSpace) + i := bytes.IndexByte(b, ' ') + if i < 0 { + panic(fmt.Sprintf("No space found in %q", b)) + } + b = b[:i] + n, err := parseUintBytes(b, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) + } + return n +} + +var littleBuf = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 64) + return &buf + }, +} + +// parseUintBytes is like strconv.ParseUint, but using a []byte. +func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(strconv.IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = strconv.ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<= base { + n = 0 + err = strconv.ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} +} + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/headermap.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/headermap.go new file mode 100644 index 000000000000..c2805f6ac4d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/headermap.go @@ -0,0 +1,78 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "net/http" + "strings" +) + +var ( + commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case + commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case +) + +func init() { + for _, v := range []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-origin", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "trailer", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + } { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) string { + if s, ok := commonLowerHeader[v]; ok { + return s + } + return strings.ToLower(v) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/encode.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 000000000000..f9bb03398483 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/encode.go @@ -0,0 +1,251 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + for idx, hf := range staticTable { + if !constantTimeStringCompare(hf.Name, f.Name) { + continue + } + if i == 0 { + i = uint64(idx + 1) + } + if f.Sensitive { + continue + } + if !constantTimeStringCompare(hf.Value, f.Value) { + continue + } + i = uint64(idx + 1) + nameValueMatch = true + return + } + + j, nameValueMatch := e.dynTab.search(f) + if nameValueMatch || (i == 0 && j != 0) { + i = j + uint64(len(staticTable)) + } + return +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.Size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Inremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/hpack.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 000000000000..dcf257afa413 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -0,0 +1,542 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid psuedo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7540 section 5.2. +func (hf HeaderField) Size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer +} + +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + emitEnabled: true, + } + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) + return d +} + +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // ents is the FIFO described at + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + // The newest (low index) is append at the end, and items are + // evicted from the front. + ents []HeaderField + size uint32 + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +// TODO: change dynamicTable to be a struct with a slice and a size int field, +// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: +// +// +// Then make add increment the size. maybe the max size should move from Decoder to +// dynamicTable and add should return an ok bool if there was enough space. +// +// Later we'll need a remove operation on dynamicTable. + +func (dt *dynamicTable) add(f HeaderField) { + dt.ents = append(dt.ents, f) + dt.size += f.Size() + dt.evict() +} + +// If we're too big, evict old stuff (front of the slice) +func (dt *dynamicTable) evict() { + base := dt.ents // keep base pointer of slice + for dt.size > dt.maxSize { + dt.size -= dt.ents[0].Size() + dt.ents = dt.ents[1:] + } + + // Shift slice contents down if we evicted things. + if len(dt.ents) != len(base) { + copy(base, dt.ents) + dt.ents = base[:len(dt.ents)] + } +} + +// constantTimeStringCompare compares string a and b in a constant +// time manner. +func constantTimeStringCompare(a, b string) bool { + if len(a) != len(b) { + return false + } + + c := byte(0) + + for i := 0; i < len(a); i++ { + c |= a[i] ^ b[i] + } + + return c == 0 +} + +// Search searches f in the table. The return value i is 0 if there is +// no name match. If there is name match or name/value match, i is the +// index of that entry (1-based). If both name and value match, +// nameValueMatch becomes true. +func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + l := len(dt.ents) + for j := l - 1; j >= 0; j-- { + ent := dt.ents[j] + if !constantTimeStringCompare(ent.Name, f.Name) { + continue + } + if i == 0 { + i = uint64(l - j) + } + if f.Sensitive { + continue + } + if !constantTimeStringCompare(ent.Value, f.Value) { + continue + } + i = uint64(l - j) + nameValueMatch = true + return + } + return +} + +func (d *Decoder) maxTableIndex() int { + return len(d.dynTab.ents) + len(staticTable) +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + if i < 1 { + return + } + if i > uint64(d.maxTableIndex()) { + return + } + if i <= uint64(len(staticTable)) { + return staticTable[i-1], true + } + dents := d.dynTab.ents + return dents[len(dents)-(int(i)-len(staticTable))], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength + } + d.saveBuf.Write(d.buf) + return len(p), nil + } + if err != nil { + break + } + } + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.buf = buf + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + wantStr := d.emitEnabled || it.indexed() + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + } + hf.Value, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil + } + + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC + } + return s, p[strLen:], nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/huffman.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 000000000000..eb4b1f05cd04 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -0,0 +1,190 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "errors" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + n := rootHuffmanNode + cur, nbits := uint(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + nbits += 8 + for nbits >= 8 { + idx := byte(cur >> (nbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } + if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + nbits -= n.codeLen + n = rootHuffmanNode + } else { + nbits -= 8 + } + } + } + for nbits > 0 { + n = n.children[byte(cur<<(8-nbits))] + if n.children != nil || n.codeLen > nbits { + break + } + buf.WriteByte(n.sym) + nbits -= n.codeLen + n = rootHuffmanNode + } + return nil +} + +type node struct { + // children is non-nil for internal nodes + children []*node + + // The following are only valid if children is nil: + codeLen uint8 // number of bits that led to the output of sym + sym byte // output symbol +} + +func newInternalNode() *node { + return &node{children: make([]*node, 256)} +} + +var rootHuffmanNode = newInternalNode() + +func init() { + if len(huffmanCodes) != 256 { + panic("unexpected size") + } + for i, code := range huffmanCodes { + addDecoderNode(byte(i), code, huffmanCodeLen[i]) + } +} + +func addDecoderNode(sym byte, code uint32, codeLen uint8) { + cur := rootHuffmanNode + for codeLen > 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/tables.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 000000000000..b9283a023302 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/tables.go @@ -0,0 +1,352 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +func pair(name, value string) HeaderField { + return HeaderField{Name: name, Value: value} +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = [...]HeaderField{ + pair(":authority", ""), // index 1 (1-based) + pair(":method", "GET"), + pair(":method", "POST"), + pair(":path", "/"), + pair(":path", "/index.html"), + pair(":scheme", "http"), + pair(":scheme", "https"), + pair(":status", "200"), + pair(":status", "204"), + pair(":status", "206"), + pair(":status", "304"), + pair(":status", "400"), + pair(":status", "404"), + pair(":status", "500"), + pair("accept-charset", ""), + pair("accept-encoding", "gzip, deflate"), + pair("accept-language", ""), + pair("accept-ranges", ""), + pair("accept", ""), + pair("access-control-allow-origin", ""), + pair("age", ""), + pair("allow", ""), + pair("authorization", ""), + pair("cache-control", ""), + pair("content-disposition", ""), + pair("content-encoding", ""), + pair("content-language", ""), + pair("content-length", ""), + pair("content-location", ""), + pair("content-range", ""), + pair("content-type", ""), + pair("cookie", ""), + pair("date", ""), + pair("etag", ""), + pair("expect", ""), + pair("expires", ""), + pair("from", ""), + pair("host", ""), + pair("if-match", ""), + pair("if-modified-since", ""), + pair("if-none-match", ""), + pair("if-range", ""), + pair("if-unmodified-since", ""), + pair("last-modified", ""), + pair("link", ""), + pair("location", ""), + pair("max-forwards", ""), + pair("proxy-authenticate", ""), + pair("proxy-authorization", ""), + pair("range", ""), + pair("referer", ""), + pair("refresh", ""), + pair("retry-after", ""), + pair("server", ""), + pair("set-cookie", ""), + pair("strict-transport-security", ""), + pair("transfer-encoding", ""), + pair("user-agent", ""), + pair("vary", ""), + pair("via", ""), + pair("www-authenticate", ""), +} + +var huffmanCodes = [256]uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = [256]uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/http2.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/http2.go new file mode 100644 index 000000000000..0529b63e2a17 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/http2.go @@ -0,0 +1,463 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http2 implements the HTTP/2 protocol. +// +// This package is low-level and intended to be used directly by very +// few people. Most users will use it indirectly through the automatic +// use by the net/http package (from Go 1.6 and later). +// For use in earlier Go versions see ConfigureServer. (Transport support +// requires Go 1.6 or later) +// +// See https://http2.github.io/ for more information on HTTP/2. +// +// See https://http2.golang.org/ for a test server running this code. +package http2 + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" +) + +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // SETTINGS_MAX_FRAME_SIZE default + // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + initialMaxFrameSize = 16384 + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // http://http2.github.io/http2-spec/#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size + + defaultMaxReadFrameSize = 1 << 20 +) + +var ( + clientPreface = []byte(ClientPreface) +) + +type streamState int + +const ( + stateIdle streamState = iota + stateOpen + stateHalfClosedLocal + stateHalfClosedRemote + stateResvLocal + stateResvRemote + stateClosed +) + +var stateName = [...]string{ + stateIdle: "Idle", + stateOpen: "Open", + stateHalfClosedLocal: "HalfClosedLocal", + stateHalfClosedRemote: "HalfClosedRemote", + stateResvLocal: "ResvLocal", + stateResvRemote: "ResvRemote", + stateClosed: "Closed", +} + +func (st streamState) String() string { + return stateName[st] +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See http://http2.github.io/http2-spec/#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} + +// Valid reports whether the setting is valid. +func (s Setting) Valid() error { + // Limits and error codes from 6.5.2 Defined SETTINGS Parameters + switch s.ID { + case SettingEnablePush: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } + case SettingInitialWindowSize: + if s.Val > 1<<31-1 { + return ConnectionError(ErrCodeFlowControl) + } + case SettingMaxFrameSize: + if s.Val < 16384 || s.Val > 1<<24-1 { + return ConnectionError(ErrCodeProtocol) + } + } + return nil +} + +// A SettingID is an HTTP/2 setting as defined in +// http://http2.github.io/http2-spec/#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +var ( + errInvalidHeaderFieldName = errors.New("http2: invalid header field name") + errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") +) + +// validHeaderFieldName reports whether v is a valid header field name (key). +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / " +// Further, http2 says: +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if int(r) >= len(isTokenTable) || ('A' <= r && r <= 'Z') { + return false + } + if !isTokenTable[byte(r)] { + return false + } + } + return true +} + +// validHeaderFieldValue reports whether v is a valid header field value. +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func validHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + if b := v[i]; b < ' ' && b != '\t' || b == 0x7f { + return false + } + } + return true +} + +var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) + +func init() { + for i := 100; i <= 999; i++ { + if v := http.StatusText(i); v != "" { + httpCodeStringCommon[i] = strconv.Itoa(i) + } + } +} + +func httpCodeString(code int) string { + if s, ok := httpCodeStringCommon[code]; ok { + return s + } + return strconv.Itoa(code) +} + +// from pkg io +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// A gate lets two goroutines coordinate their activities. +type gate chan struct{} + +func (g gate) Done() { g <- struct{}{} } +func (g gate) Wait() { <-g } + +// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). +type closeWaiter chan struct{} + +// Init makes a closeWaiter usable. +// It exists because so a closeWaiter value can be placed inside a +// larger struct and have the Mutex and Cond's memory in the same +// allocation. +func (cw *closeWaiter) Init() { + *cw = make(chan struct{}) +} + +// Close marks the closeWaiter as closed and unblocks any waiters. +func (cw closeWaiter) Close() { + close(cw) +} + +// Wait waits for the closeWaiter to become closed. +func (cw closeWaiter) Wait() { + <-cw +} + +// bufferedWriter is a buffered writer that writes to w. +// Its buffered writer is lazily allocated as needed, to minimize +// idle memory usage with many connections. +type bufferedWriter struct { + w io.Writer // immutable + bw *bufio.Writer // non-nil when data is buffered +} + +func newBufferedWriter(w io.Writer) *bufferedWriter { + return &bufferedWriter{w: w} +} + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + // TODO: pick something better? this is a bit under + // (3 x typical 1500 byte MTU) at least. + return bufio.NewWriterSize(nil, 4<<10) + }, +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + if w.bw == nil { + bw := bufWriterPool.Get().(*bufio.Writer) + bw.Reset(w.w) + w.bw = bw + } + return w.bw.Write(p) +} + +func (w *bufferedWriter) Flush() error { + bw := w.bw + if bw == nil { + return nil + } + err := bw.Flush() + bw.Reset(nil) + bufWriterPool.Put(bw) + w.bw = nil + return err +} + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC2616, section 4.4. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owners, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go15.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go15.go new file mode 100644 index 000000000000..d0fa5c890691 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go15.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package http2 + +import "net/http" + +func requestCancel(req *http.Request) <-chan struct{} { return nil } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go16.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go16.go new file mode 100644 index 000000000000..db53c5b8cbfc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go16.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.6 + +package http2 + +import "net/http" + +func configureTransport(t1 *http.Transport) (*Transport, error) { + return nil, errTransportVersion +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/pipe.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/pipe.go new file mode 100644 index 000000000000..69446e7a3701 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/pipe.go @@ -0,0 +1,147 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "io" + "sync" +) + +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) +type pipe struct { + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + return 0, p.err + } + p.c.Wait() + } +} + +var errClosedPipeWrite = errors.New("write on closed buffer") + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + return p.b.Write(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/server.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/server.go new file mode 100644 index 000000000000..1e6980c319de --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/server.go @@ -0,0 +1,2178 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: replace all <-sc.doneServing with reads from the stream's cw +// instead, and make sure that on close we close all open +// streams. then remove doneServing? + +// TODO: re-audit GOAWAY support. Consider each incoming frame type and +// whether it should be ignored during graceful shutdown. + +// TODO: disconnect idle clients. GFE seems to do 4 minutes. make +// configurable? or maximum number of idle clients and remove the +// oldest? + +// TODO: turn off the serve goroutine when idle, so +// an idle conn only has the readFrames goroutine active. (which could +// also be optimized probably to pin less memory in crypto/tls). This +// would involve tracking when the serve goroutine is active (atomic +// int32 read/CAS probably?) and starting it up when frames arrive, +// and shutting it down when all handlers exit. the occasional PING +// packets could use time.AfterFunc to call sc.wakeStartServeLoop() +// (which is a no-op if already running) and then queue the PING write +// as normal. The serve loop would then exit in most cases (if no +// Handlers running) and not be woken up again until the PING packet +// returns. + +// TODO (maybe): add a mechanism for Handlers to going into +// half-closed-local mode (rw.(io.Closer) test?) but not exit their +// handler, and continue to be able to read from the +// Request.Body. This would be a somewhat semantic change from HTTP/1 +// (or at least what we expose in net/http), so I'd probably want to +// add it there too. For now, this package says that returning from +// the Handler ServeHTTP function means you're both done reading and +// done writing, without a way to stop just one or the other. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" +) + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? +) + +var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") +) + +var responseWriterStatePool = sync.Pool{ + New: func() interface{} { + rws := &responseWriterState{} + rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) + return rws + }, +} + +// Test hooks. +var ( + testHookOnConn func() + testHookGetServerConn func(*serverConn) + testHookOnPanicMu *sync.Mutex // nil except in tests + testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) +) + +// Server is an HTTP/2 server. +type Server struct { + // MaxHandlers limits the number of http.Handler ServeHTTP goroutines + // which may run at a time over all connections. + // Negative or zero no limit. + // TODO: implement + MaxHandlers int + + // MaxConcurrentStreams optionally specifies the number of + // concurrent streams that each client may have open at a + // time. This is unrelated to the number of http.Handler goroutines + // which may be active globally, which is MaxHandlers. + // If zero, MaxConcurrentStreams defaults to at least 100, per + // the HTTP/2 spec's recommendations. + MaxConcurrentStreams uint32 + + // MaxReadFrameSize optionally specifies the largest frame + // this server is willing to read. A valid value is between + // 16k and 16M, inclusive. If zero or otherwise invalid, a + // default value is used. + MaxReadFrameSize uint32 + + // PermitProhibitedCipherSuites, if true, permits the use of + // cipher suites prohibited by the HTTP/2 spec. + PermitProhibitedCipherSuites bool +} + +func (s *Server) maxReadFrameSize() uint32 { + if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { + return v + } + return defaultMaxReadFrameSize +} + +func (s *Server) maxConcurrentStreams() uint32 { + if v := s.MaxConcurrentStreams; v > 0 { + return v + } + return defaultMaxStreams +} + +// ConfigureServer adds HTTP/2 support to a net/http Server. +// +// The configuration conf may be nil. +// +// ConfigureServer must be called before s begins serving. +func ConfigureServer(s *http.Server, conf *Server) error { + if conf == nil { + conf = new(Server) + } + + if s.TLSConfig == nil { + s.TLSConfig = new(tls.Config) + } else if s.TLSConfig.CipherSuites != nil { + // If they already provided a CipherSuite list, return + // an error if it has a bad order or is missing + // ECDHE_RSA_WITH_AES_128_GCM_SHA256. + const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + haveRequired := false + sawBad := false + for i, cs := range s.TLSConfig.CipherSuites { + if cs == requiredCipher { + haveRequired = true + } + if isBadCipher(cs) { + sawBad = true + } else if sawBad { + return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) + } + } + if !haveRequired { + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") + } + } + + // Note: not setting MinVersion to tls.VersionTLS12, + // as we don't want to interfere with HTTP/1.1 traffic + // on the user's server. We enforce TLS 1.2 later once + // we accept a connection. Ideally this should be done + // during next-proto selection, but using TLS <1.2 with + // HTTP/2 is still the client's bug. + + s.TLSConfig.PreferServerCipherSuites = true + + haveNPN := false + for _, p := range s.TLSConfig.NextProtos { + if p == NextProtoTLS { + haveNPN = true + break + } + } + if !haveNPN { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) + } + // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers + // to switch to "h2". + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14") + + if s.TLSNextProto == nil { + s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} + } + protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + if testHookOnConn != nil { + testHookOnConn() + } + conf.ServeConn(c, &ServeConnOpts{ + Handler: h, + BaseConfig: hs, + }) + } + s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto["h2-14"] = protoHandler // temporary; see above. + return nil +} + +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + sc := &serverConn{ + srv: s, + hs: opts.baseConfig(), + conn: c, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan frameWriteMsg, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + advMaxStreams: s.maxConcurrentStreams(), + writeSched: writeScheduler{ + maxFrameSize: initialMaxFrameSize, + }, + initialWindowSize: initialWindowSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + sc.flow.add(initialWindowSize) + sc.inflow.add(initialWindowSize) + sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + + fr := NewFramer(sc.bw, c) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + sc.framer = fr + + if tc, ok := c.(connectionStater); ok { + sc.tlsState = new(tls.ConnectionState) + *sc.tlsState = tc.ConnectionState() + // 9.2 Use of TLS Features + // An implementation of HTTP/2 over TLS MUST use TLS + // 1.2 or higher with the restrictions on feature set + // and cipher suite described in this section. Due to + // implementation limitations, it might not be + // possible to fail TLS negotiation. An endpoint MUST + // immediately terminate an HTTP/2 connection that + // does not meet the TLS requirements described in + // this section with a connection error (Section + // 5.4.1) of type INADEQUATE_SECURITY. + if sc.tlsState.Version < tls.VersionTLS12 { + sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") + return + } + + if sc.tlsState.ServerName == "" { + // Client must use SNI, but we don't enforce that anymore, + // since it was causing problems when connecting to bare IP + // addresses during development. + // + // TODO: optionally enforce? Or enforce at the time we receive + // a new request, and verify the the ServerName matches the :authority? + // But that precludes proxy situations, perhaps. + // + // So for now, do nothing here again. + } + + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + // "Endpoints MAY choose to generate a connection error + // (Section 5.4.1) of type INADEQUATE_SECURITY if one of + // the prohibited cipher suites are negotiated." + // + // We choose that. In my opinion, the spec is weak + // here. It also says both parties must support at least + // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no + // excuses here. If we really must, we could allow an + // "AllowInsecureWeakCiphers" option on the server later. + // Let's see how it plays out first. + sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) + return + } + } + + if hook := testHookGetServerConn; hook != nil { + hook(sc) + } + sc.serve() +} + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +func isBadCipher(cipher uint16) bool { + switch cipher { + case tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: + // Reject cipher suites from Appendix A. + // "This list includes those cipher suites that do not + // offer an ephemeral key exchange and those that are + // based on the TLS null, stream or block cipher type" + return true + default: + return false + } +} + +func (sc *serverConn) rejectConn(err ErrCode, debug string) { + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) + // ignoring errors. hanging up anyway. + sc.framer.WriteGoAway(0, err, []byte(debug)) + sc.bw.Flush() + sc.conn.Close() +} + +type serverConn struct { + // Immutable: + srv *Server + hs *http.Server + conn net.Conn + bw *bufferedWriter // writing to conn + handler http.Handler + framer *Framer + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan frameWriteMsg // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + testHookCh chan func(int) // code to run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http + remoteAddrStr string + + // Everything following is owned by the serve loop; use serveG.check(): + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curOpenStreams uint32 // client's number of open streams + maxStreamID uint32 // max ever seen + streams map[uint32]*stream + initialWindowSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + writeSched writeScheduler + inGoAway bool // we've started to or sent GOAWAY + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimerCh <-chan time.Time // nil until used + shutdownTimer *time.Timer // nil until used + freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf + + // Owned by the writeFrameAsync goroutine: + headerWriteBuf bytes.Buffer + hpackEncoder *hpack.Encoder +} + +func (sc *serverConn) maxHeaderListSize() uint32 { + n := sc.hs.MaxHeaderBytes + if n <= 0 { + n = http.DefaultMaxHeaderBytes + } + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return uint32(n + typicalHeaders*perFieldOverhead) +} + +// stream represents a stream. This is the minimal metadata needed by +// the serve goroutine. Most of the actual stream state is owned by +// the http.Handler's goroutine in the responseWriter. Because the +// responseWriter's responseWriterState is recycled at the end of a +// handler, this struct intentionally has no pointer to the +// *responseWriter{,State} itself, as the Handler ending nils out the +// responseWriter's state field. +type stream struct { + // immutable: + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + + // owned by serverConn's serve loop: + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 + state streamState + sentReset bool // only true once detached from streams map + gotReset bool // only true once detacted from streams map + gotTrailerHeader bool // HEADER frame for trailers was seen + reqBuf []byte + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer +} + +func (sc *serverConn) Framer() *Framer { return sc.framer } +func (sc *serverConn) CloseConn() error { return sc.conn.Close() } +func (sc *serverConn) Flush() error { return sc.bw.Flush() } +func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { + return sc.hpackEncoder, &sc.headerWriteBuf +} + +func (sc *serverConn) state(streamID uint32) (streamState, *stream) { + sc.serveG.check() + // http://http2.github.io/http2-spec/#rfc.section.5.1 + if st, ok := sc.streams[streamID]; ok { + return st.state, st + } + // "The first use of a new stream identifier implicitly closes all + // streams in the "idle" state that might have been initiated by + // that peer with a lower-valued stream identifier. For example, if + // a client sends a HEADERS frame on stream 7 without ever sending a + // frame on stream 5, then stream 5 transitions to the "closed" + // state when the first frame for stream 7 is sent or received." + if streamID <= sc.maxStreamID { + return stateClosed, nil + } + return stateIdle, nil +} + +// setConnState calls the net/http ConnState hook for this connection, if configured. +// Note that the net/http package does StateNew and StateClosed for us. +// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. +func (sc *serverConn) setConnState(state http.ConnState) { + if sc.hs.ConnState != nil { + sc.hs.ConnState(sc.conn, state) + } +} + +func (sc *serverConn) vlogf(format string, args ...interface{}) { + if VerboseLogs { + sc.logf(format, args...) + } +} + +func (sc *serverConn) logf(format string, args ...interface{}) { + if lg := sc.hs.ErrorLog; lg != nil { + lg.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + +func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { + if err == nil { + return + } + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { + // Boring, expected errors. + sc.vlogf(format, args...) + } else { + sc.logf(format, args...) + } +} + +func (sc *serverConn) canonicalHeader(v string) string { + sc.serveG.check() + cv, ok := commonCanonHeader[v] + if ok { + return cv + } + cv, ok = sc.canonHeader[v] + if ok { + return cv + } + if sc.canonHeader == nil { + sc.canonHeader = make(map[string]string) + } + cv = http.CanonicalHeaderKey(v) + sc.canonHeader[v] = cv + return cv +} + +type readFrameResult struct { + f Frame // valid until readMore is called + err error + + // readMore should be called once the consumer no longer needs or + // retains f. After readMore, f is invalid and more frames can be + // read. + readMore func() +} + +// readFrames is the loop that reads incoming frames. +// It takes care to only read one frame at a time, blocking until the +// consumer is done with the frame. +// It's run on its own goroutine. +func (sc *serverConn) readFrames() { + gate := make(gate) + gateDone := gate.Done + for { + f, err := sc.framer.ReadFrame() + select { + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: + case <-sc.doneServing: + return + } + select { + case <-gate: + case <-sc.doneServing: + return + } + if terminalReadFrameError(err) { + return + } + } +} + +// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. +type frameWriteResult struct { + wm frameWriteMsg // what was written (or attempted) + err error // result of the writeFrame call +} + +// writeFrameAsync runs in its own goroutine and writes a single frame +// and then reports when it's done. +// At most one goroutine can be running writeFrameAsync at a time per +// serverConn. +func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) { + err := wm.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wm, err} +} + +func (sc *serverConn) closeAllStreamsOnConnClose() { + sc.serveG.check() + for _, st := range sc.streams { + sc.closeStream(st, errClientDisconnected) + } +} + +func (sc *serverConn) stopShutdownTimer() { + sc.serveG.check() + if t := sc.shutdownTimer; t != nil { + t.Stop() + } +} + +func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. + if testHookOnPanicMu != nil { + testHookOnPanicMu.Lock() + defer testHookOnPanicMu.Unlock() + } + if testHookOnPanic != nil { + if e := recover(); e != nil { + if testHookOnPanic(sc, e) { + panic(e) + } + } + } +} + +func (sc *serverConn) serve() { + sc.serveG.check() + defer sc.notePanic() + defer sc.conn.Close() + defer sc.closeAllStreamsOnConnClose() + defer sc.stopShutdownTimer() + defer close(sc.doneServing) // unblocks handlers trying to send + + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } + + sc.writeFrame(frameWriteMsg{ + write: writeSettings{ + {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + + // TODO: more actual settings, notably + // SettingInitialWindowSize, but then we also + // want to bump up the conn window size the + // same amount here right after the settings + }, + }) + sc.unackedSettings++ + + if err := sc.readPreface(); err != nil { + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + return + } + // Now that we've got the preface, get us out of the + // "StateNew" state. We can't go directly to idle, though. + // Active means we read some data and anticipate a request. We'll + // do another Active when we get a HEADERS frame. + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) + + go sc.readFrames() // closed by defer sc.conn.Close above + + settingsTimer := time.NewTimer(firstSettingsTimeout) + loopNum := 0 + for { + loopNum++ + select { + case wm := <-sc.wantWriteFrameCh: + sc.writeFrame(wm) + case res := <-sc.wroteFrameCh: + sc.wroteFrame(res) + case res := <-sc.readFrameCh: + if !sc.processFrameFromReader(res) { + return + } + res.readMore() + if settingsTimer.C != nil { + settingsTimer.Stop() + settingsTimer.C = nil + } + case m := <-sc.bodyReadCh: + sc.noteBodyRead(m.st, m.n) + case <-settingsTimer.C: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case <-sc.shutdownTimerCh: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case fn := <-sc.testHookCh: + fn(loopNum) + } + } +} + +// readPreface reads the ClientPreface greeting from the peer +// or returns an error on timeout or an invalid greeting. +func (sc *serverConn) readPreface() error { + errc := make(chan error, 1) + go func() { + // Read the client preface + buf := make([]byte, len(ClientPreface)) + if _, err := io.ReadFull(sc.conn, buf); err != nil { + errc <- err + } else if !bytes.Equal(buf, clientPreface) { + errc <- fmt.Errorf("bogus greeting %q", buf) + } else { + errc <- nil + } + }() + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + defer timer.Stop() + select { + case <-timer.C: + return errors.New("timeout waiting for client preface") + case err := <-errc: + if err == nil { + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } + } + return err + } +} + +var errChanPool = sync.Pool{ + New: func() interface{} { return make(chan error, 1) }, +} + +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { + ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} + err := sc.writeFrameFromHandler(frameWriteMsg{ + write: writeArg, + stream: stream, + done: ch, + }) + if err != nil { + return err + } + var frameWriteDone bool // the frame write is done (successfully or not) + select { + case err = <-ch: + frameWriteDone = true + case <-sc.doneServing: + return errClientDisconnected + case <-stream.cw: + // If both ch and stream.cw were ready (as might + // happen on the final Write after an http.Handler + // ends), prefer the write result. Otherwise this + // might just be us successfully closing the stream. + // The writeFrameAsync and serve goroutines guarantee + // that the ch send will happen before the stream.cw + // close. + select { + case err = <-ch: + frameWriteDone = true + default: + return errStreamClosed + } + } + errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } + return err +} + +// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts +// if the connection has gone away. +// +// This must not be run from the serve goroutine itself, else it might +// deadlock writing to sc.wantWriteFrameCh (which is only mildly +// buffered and is read by serve itself). If you're on the serve +// goroutine, call writeFrame instead. +func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error { + sc.serveG.checkNotOn() // NOT + select { + case sc.wantWriteFrameCh <- wm: + return nil + case <-sc.doneServing: + // Serve loop is gone. + // Client has closed their connection to the server. + return errClientDisconnected + } +} + +// writeFrame schedules a frame to write and sends it if there's nothing +// already being written. +// +// There is no pushback here (the serve goroutine never blocks). It's +// the http.Handlers that block, waiting for their previous frames to +// make it onto the wire +// +// If you're not on the serve goroutine, use writeFrameFromHandler instead. +func (sc *serverConn) writeFrame(wm frameWriteMsg) { + sc.serveG.check() + sc.writeSched.add(wm) + sc.scheduleFrameWrite() +} + +// startFrameWrite starts a goroutine to write wm (in a separate +// goroutine since that might block on the network), and updates the +// serve goroutine's state about the world, updated from info in wm. +func (sc *serverConn) startFrameWrite(wm frameWriteMsg) { + sc.serveG.check() + if sc.writingFrame { + panic("internal error: can only be writing one frame at a time") + } + + st := wm.stream + if st != nil { + switch st.state { + case stateHalfClosedLocal: + panic("internal error: attempt to send frame on half-closed-local stream") + case stateClosed: + if st.sentReset || st.gotReset { + // Skip this frame. + sc.scheduleFrameWrite() + return + } + panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm)) + } + } + + sc.writingFrame = true + sc.needsFrameFlush = true + go sc.writeFrameAsync(wm) +} + +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// the main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + +// wroteFrame is called on the serve goroutine with the result of +// whatever happened on writeFrameAsync. +func (sc *serverConn) wroteFrame(res frameWriteResult) { + sc.serveG.check() + if !sc.writingFrame { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + + wm := res.wm + st := wm.stream + + closeStream := endsStream(wm.write) + + if _, ok := wm.write.(handlerPanicRST); ok { + sc.closeStream(st, errHandlerPanicked) + } + + // Reply (if requested) to the blocked ServeHTTP goroutine. + if ch := wm.done; ch != nil { + select { + case ch <- res.err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write)) + } + } + wm.write = nil // prevent use (assume it's tainted after wm.done send) + + if closeStream { + if st == nil { + panic("internal error: expecting non-nil stream") + } + switch st.state { + case stateOpen: + // Here we would go to stateHalfClosedLocal in + // theory, but since our handler is done and + // the net/http package provides no mechanism + // for finishing writing to a ResponseWriter + // while still reading data (see possible TODO + // at top of this file), we go into closed + // state here anyway, after telling the peer + // we're hanging up on them. + st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream + errCancel := StreamError{st.id, ErrCodeCancel} + sc.resetStream(errCancel) + case stateHalfClosedRemote: + sc.closeStream(st, errHandlerComplete) + } + } + + sc.scheduleFrameWrite() +} + +// scheduleFrameWrite tickles the frame writing scheduler. +// +// If a frame is already being written, nothing happens. This will be called again +// when the frame is done being written. +// +// If a frame isn't being written we need to send one, the best frame +// to send is selected, preferring first things that aren't +// stream-specific (e.g. ACKing settings), and then finding the +// highest priority stream. +// +// If a frame isn't being written and there's nothing else to send, we +// flush the write buffer. +func (sc *serverConn) scheduleFrameWrite() { + sc.serveG.check() + if sc.writingFrame { + return + } + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(frameWriteMsg{ + write: &writeGoAway{ + maxStreamID: sc.maxStreamID, + code: sc.goAwayCode, + }, + }) + return + } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}}) + return + } + if !sc.inGoAway { + if wm, ok := sc.writeSched.take(); ok { + sc.startFrameWrite(wm) + return + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + return + } +} + +func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + if sc.inGoAway { + return + } + if code != ErrCodeNo { + sc.shutDownIn(250 * time.Millisecond) + } else { + // TODO: configurable + sc.shutDownIn(1 * time.Second) + } + sc.inGoAway = true + sc.needToSendGoAway = true + sc.goAwayCode = code + sc.scheduleFrameWrite() +} + +func (sc *serverConn) shutDownIn(d time.Duration) { + sc.serveG.check() + sc.shutdownTimer = time.NewTimer(d) + sc.shutdownTimerCh = sc.shutdownTimer.C +} + +func (sc *serverConn) resetStream(se StreamError) { + sc.serveG.check() + sc.writeFrame(frameWriteMsg{write: se}) + if st, ok := sc.streams[se.StreamID]; ok { + st.sentReset = true + sc.closeStream(st, se) + } +} + +// processFrameFromReader processes the serve loop's read from readFrameCh from the +// frame-reading goroutine. +// processFrameFromReader returns whether the connection should be kept open. +func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { + sc.serveG.check() + err := res.err + if err != nil { + if err == ErrFrameTooLarge { + sc.goAway(ErrCodeFrameSize) + return true // goAway will close the loop + } + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) + if clientGone { + // TODO: could we also get into this state if + // the peer does a half close + // (e.g. CloseWrite) because they're done + // sending frames but they're still wanting + // our open replies? Investigate. + // TODO: add CloseWrite to crypto/tls.Conn first + // so we have a way to test this? I suppose + // just for testing we could have a non-TLS mode. + return false + } + } else { + f := res.f + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } + err = sc.processFrame(f) + if err == nil { + return true + } + } + + switch ev := err.(type) { + case StreamError: + sc.resetStream(ev) + return true + case goAwayFlowError: + sc.goAway(ErrCodeFlowControl) + return true + case ConnectionError: + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) + sc.goAway(ErrCode(ev)) + return true // goAway will handle shutdown + default: + if res.err != nil { + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + } else { + sc.logf("http2: server closing client connection: %v", err) + } + return false + } +} + +func (sc *serverConn) processFrame(f Frame) error { + sc.serveG.check() + + // First frame received must be SETTINGS. + if !sc.sawFirstSettings { + if _, ok := f.(*SettingsFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } + sc.sawFirstSettings = true + } + + switch f := f.(type) { + case *SettingsFrame: + return sc.processSettings(f) + case *MetaHeadersFrame: + return sc.processHeaders(f) + case *WindowUpdateFrame: + return sc.processWindowUpdate(f) + case *PingFrame: + return sc.processPing(f) + case *DataFrame: + return sc.processData(f) + case *RSTStreamFrame: + return sc.processResetStream(f) + case *PriorityFrame: + return sc.processPriority(f) + case *PushPromiseFrame: + // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE + // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + default: + sc.vlogf("http2: server ignoring frame: %v", f.Header()) + return nil + } +} + +func (sc *serverConn) processPing(f *PingFrame) error { + sc.serveG.check() + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + if f.StreamID != 0 { + // "PING frames are not associated with any individual + // stream. If a PING frame is received with a stream + // identifier field value other than 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + sc.writeFrame(frameWriteMsg{write: writePingAck{f}}) + return nil +} + +func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { + sc.serveG.check() + switch { + case f.StreamID != 0: // stream-level flow control + st := sc.streams[f.StreamID] + if st == nil { + // "WINDOW_UPDATE can be sent by a peer that has sent a + // frame bearing the END_STREAM flag. This means that a + // receiver could receive a WINDOW_UPDATE frame on a "half + // closed (remote)" or "closed" stream. A receiver MUST + // NOT treat this as an error, see Section 5.1." + return nil + } + if !st.flow.add(int32(f.Increment)) { + return StreamError{f.StreamID, ErrCodeFlowControl} + } + default: // connection-level flow control + if !sc.flow.add(int32(f.Increment)) { + return goAwayFlowError{} + } + } + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { + sc.serveG.check() + + state, st := sc.state(f.StreamID) + if state == stateIdle { + // 6.4 "RST_STREAM frames MUST NOT be sent for a + // stream in the "idle" state. If a RST_STREAM frame + // identifying an idle stream is received, the + // recipient MUST treat this as a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if st != nil { + st.gotReset = true + sc.closeStream(st, StreamError{f.StreamID, f.ErrCode}) + } + return nil +} + +func (sc *serverConn) closeStream(st *stream, err error) { + sc.serveG.check() + if st.state == stateIdle || st.state == stateClosed { + panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) + } + st.state = stateClosed + sc.curOpenStreams-- + if sc.curOpenStreams == 0 { + sc.setConnState(http.StateIdle) + } + delete(sc.streams, st.id) + if p := st.body; p != nil { + p.CloseWithError(err) + } + st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc + sc.writeSched.forgetStream(st.id) + if st.reqBuf != nil { + // Stash this request body buffer (64k) away for reuse + // by a future POST/PUT/etc. + // + // TODO(bradfitz): share on the server? sync.Pool? + // Server requires locks and might hurt contention. + // sync.Pool might work, or might be worse, depending + // on goroutine CPU migrations. (get and put on + // separate CPUs). Maybe a mix of strategies. But + // this is an easy win for now. + sc.freeRequestBodyBuf = st.reqBuf + } +} + +func (sc *serverConn) processSettings(f *SettingsFrame) error { + sc.serveG.check() + if f.IsAck() { + sc.unackedSettings-- + if sc.unackedSettings < 0 { + // Why is the peer ACKing settings we never sent? + // The spec doesn't mention this case, but + // hang up on them anyway. + return ConnectionError(ErrCodeProtocol) + } + return nil + } + if err := f.ForeachSetting(sc.processSetting); err != nil { + return err + } + sc.needToSendSettingsAck = true + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processSetting(s Setting) error { + sc.serveG.check() + if err := s.Valid(); err != nil { + return err + } + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } + switch s.ID { + case SettingHeaderTableSize: + sc.headerTableSize = s.Val + sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) + case SettingEnablePush: + sc.pushEnabled = s.Val != 0 + case SettingMaxConcurrentStreams: + sc.clientMaxStreams = s.Val + case SettingInitialWindowSize: + return sc.processSettingInitialWindowSize(s.Val) + case SettingMaxFrameSize: + sc.writeSched.maxFrameSize = s.Val + case SettingMaxHeaderListSize: + sc.peerMaxHeaderListSize = s.Val + default: + // Unknown setting: "An endpoint that receives a SETTINGS + // frame with any unknown or unsupported identifier MUST + // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } + } + return nil +} + +func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { + sc.serveG.check() + // Note: val already validated to be within range by + // processSetting's Valid call. + + // "A SETTINGS frame can alter the initial flow control window + // size for all current streams. When the value of + // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST + // adjust the size of all stream flow control windows that it + // maintains by the difference between the new value and the + // old value." + old := sc.initialWindowSize + sc.initialWindowSize = int32(val) + growth := sc.initialWindowSize - old // may be negative + for _, st := range sc.streams { + if !st.flow.add(growth) { + // 6.9.2 Initial Flow Control Window Size + // "An endpoint MUST treat a change to + // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow + // control window to exceed the maximum size as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR." + return ConnectionError(ErrCodeFlowControl) + } + } + return nil +} + +func (sc *serverConn) processData(f *DataFrame) error { + sc.serveG.check() + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." + id := f.Header().StreamID + st, ok := sc.streams[id] + if !ok || st.state != stateOpen || st.gotTrailerHeader { + // This includes sending a RST_STREAM if the stream is + // in stateHalfClosedLocal (which currently means that + // the http.Handler returned, so it's done reading & + // done writing). Try to stop the client from sending + // more DATA. + return StreamError{id, ErrCodeStreamClosed} + } + if st.body == nil { + panic("internal error: should have a body in this state") + } + data := f.Data() + + // Sender sending more than they'd declared? + if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + return StreamError{id, ErrCodeStreamClosed} + } + if len(data) > 0 { + // Check whether the client has flow control quota. + if int(st.inflow.available()) < len(data) { + return StreamError{id, ErrCodeFlowControl} + } + st.inflow.take(int32(len(data))) + wrote, err := st.body.Write(data) + if err != nil { + return StreamError{id, ErrCodeStreamClosed} + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) + } + if f.StreamEnded() { + st.endStream() + } + return nil +} + +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + sc.serveG.check() + id := f.Header().StreamID + if sc.inGoAway { + // Ignore. + return nil + } + // http://http2.github.io/http2-spec/#rfc.section.5.1.1 + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { + return ConnectionError(ErrCodeProtocol) + } + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + st := sc.streams[f.Header().StreamID] + if st != nil { + return st.processTrailerHeaders(f) + } + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxStreamID { + return ConnectionError(ErrCodeProtocol) + } + sc.maxStreamID = id + + st = &stream{ + sc: sc, + id: id, + state: stateOpen, + } + if f.StreamEnded() { + st.state = stateHalfClosedRemote + } + st.cw.Init() + + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings + + sc.streams[id] = st + if f.HasPriority() { + adjustStreamPriority(sc.streams, st.id, f.Priority) + } + sc.curOpenStreams++ + if sc.curOpenStreams == 1 { + sc.setConnState(http.StateActive) + } + if sc.curOpenStreams > sc.advMaxStreams { + // "Endpoints MUST NOT exceed the limit set by their + // peer. An endpoint that receives a HEADERS frame + // that causes their advertised concurrent stream + // limit to be exceeded MUST treat this as a stream + // error (Section 5.4.2) of type PROTOCOL_ERROR or + // REFUSED_STREAM." + if sc.unackedSettings == 0 { + // They should know better. + return StreamError{st.id, ErrCodeProtocol} + } + // Assume it's a network race, where they just haven't + // received our last SETTINGS update. But actually + // this can't happen yet, because we don't yet provide + // a way for users to adjust server parameters at + // runtime. + return StreamError{st.id, ErrCodeRefusedStream} + } + + rw, req, err := sc.newWriterAndRequest(st, f) + if err != nil { + return err + } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + st.body = req.Body.(*requestBody).pipe // may be nil + st.declBodyBytes = req.ContentLength + + handler := sc.handler.ServeHTTP + if f.Truncated { + // Their header list was too long. Send a 431 error. + handler = handleHeaderListTooLong + } + + go sc.runHandler(rw, req, handler) + return nil +} + +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return StreamError{st.id, ErrCodeProtocol} + } + + if len(f.PseudoFields()) > 0 { + return StreamError{st.id, ErrCodeProtocol} + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() + return nil +} + +func (sc *serverConn) processPriority(f *PriorityFrame) error { + adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam) + return nil +} + +func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) { + st, ok := streams[streamID] + if !ok { + // TODO: not quite correct (this streamID might + // already exist in the dep tree, but be closed), but + // close enough for now. + return + } + st.weight = priority.Weight + parent := streams[priority.StreamDep] // might be nil + if parent == st { + // if client tries to set this stream to be the parent of itself + // ignore and keep going + return + } + + // section 5.3.3: If a stream is made dependent on one of its + // own dependencies, the formerly dependent stream is first + // moved to be dependent on the reprioritized stream's previous + // parent. The moved dependency retains its weight. + for piter := parent; piter != nil; piter = piter.parent { + if piter == st { + parent.parent = st.parent + break + } + } + st.parent = parent + if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) { + for _, openStream := range streams { + if openStream != st && openStream.parent == st.parent { + openStream.parent = st + } + } + } +} + +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + method := f.PseudoValue("method") + path := f.PseudoValue("path") + scheme := f.PseudoValue("scheme") + authority := f.PseudoValue("authority") + + isConnect := method == "CONNECT" + if isConnect { + if path != "" || scheme != "" || authority == "" { + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + } else if method == "" || path == "" || + (scheme != "https" && scheme != "http") { + // See 8.1.2.6 Malformed Requests and Responses: + // + // Malformed requests or responses that are detected + // MUST be treated as a stream error (Section 5.4.2) + // of type PROTOCOL_ERROR." + // + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid + // value for the :method, :scheme, and :path + // pseudo-header fields" + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + + bodyOpen := !f.StreamEnded() + if method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + var tlsState *tls.ConnectionState // nil if not scheme https + + if scheme == "https" { + tlsState = sc.tlsState + } + + header := make(http.Header) + for _, hf := range f.RegularFields() { + header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + + if authority == "" { + authority = header.Get("Host") + } + needsContinue := header.Get("Expect") == "100-continue" + if needsContinue { + header.Del("Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := header["Cookie"]; len(cookies) > 1 { + header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } + } + delete(header, "Trailer") + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, + } + var url_ *url.URL + var requestURI string + if isConnect { + url_ = &url.URL{Host: authority} + requestURI = authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(path) + if err != nil { + return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + } + requestURI = path + } + req := &http.Request{ + Method: method, + URL: url_, + RemoteAddr: sc.remoteAddrStr, + Header: header, + RequestURI: requestURI, + Proto: "HTTP/2.0", + ProtoMajor: 2, + ProtoMinor: 0, + TLS: tlsState, + Host: authority, + Body: body, + Trailer: trailer, + } + if bodyOpen { + st.reqBuf = sc.getRequestBodyBuf() + body.pipe = &pipe{ + b: &fixedBuffer{buf: st.reqBuf}, + } + + if vv, ok := header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + } + + rws := responseWriterStatePool.Get().(*responseWriterState) + bwSave := rws.bw + *rws = responseWriterState{} // zero all the fields + rws.conn = sc + rws.bw = bwSave + rws.bw.Reset(chunkWriter{rws}) + rws.stream = st + rws.req = req + rws.body = body + + rw := &responseWriter{rws: rws} + return rw, req, nil +} + +func (sc *serverConn) getRequestBodyBuf() []byte { + sc.serveG.check() + if buf := sc.freeRequestBodyBuf; buf != nil { + sc.freeRequestBodyBuf = nil + return buf + } + return make([]byte, initialWindowSize) +} + +// Run on its own goroutine. +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + didPanic := true + defer func() { + if didPanic { + e := recover() + // Same as net/http: + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.writeFrameFromHandler(frameWriteMsg{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + return + } + rw.handlerDone() + }() + handler(rw, req) + didPanic = false +} + +func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { + // 10.5.1 Limits on Header Block Size: + // .. "A server that receives a larger header block than it is + // willing to handle can send an HTTP 431 (Request Header Fields Too + // Large) status code" + const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ + w.WriteHeader(statusRequestHeaderFieldsTooLarge) + io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") +} + +// called from handler goroutines. +// h may be nil. +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { + sc.serveG.checkNotOn() // NOT on + var errc chan error + if headerData.h != nil { + // If there's a header map (which we don't own), so we have to block on + // waiting for this frame to be written, so an http.Flush mid-handler + // writes out the correct value of keys, before a handler later potentially + // mutates it. + errc = errChanPool.Get().(chan error) + } + if err := sc.writeFrameFromHandler(frameWriteMsg{ + write: headerData, + stream: st, + done: errc, + }); err != nil { + return err + } + if errc != nil { + select { + case err := <-errc: + errChanPool.Put(errc) + return err + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + } + } + return nil +} + +// called from handler goroutines. +func (sc *serverConn) write100ContinueHeaders(st *stream) { + sc.writeFrameFromHandler(frameWriteMsg{ + write: write100ContinueHeadersFrame{st.id}, + stream: st, + }) +} + +// A bodyReadMsg tells the server loop that the http.Handler read n +// bytes of the DATA from the client on the given stream. +type bodyReadMsg struct { + st *stream + n int +} + +// called from handler goroutines. +// Notes that the handler for the given stream ID read n bytes of its body +// and schedules flow control tokens to be sent. +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) { + sc.serveG.checkNotOn() // NOT on + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } +} + +func (sc *serverConn) noteBodyRead(st *stream, n int) { + sc.serveG.check() + sc.sendWindowUpdate(nil, n) // conn-level + if st.state != stateHalfClosedRemote && st.state != stateClosed { + // Don't send this WINDOW_UPDATE if the stream is closed + // remotely. + sc.sendWindowUpdate(st, n) + } +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { + sc.serveG.check() + // "The legal range for the increment to the flow control + // window is 1 to 2^31-1 (2,147,483,647) octets." + // A Go Read call on 64-bit machines could in theory read + // a larger Read than this. Very unlikely, but we handle it here + // rather than elsewhere for now. + const maxUint31 = 1<<31 - 1 + for n >= maxUint31 { + sc.sendWindowUpdate32(st, maxUint31) + n -= maxUint31 + } + sc.sendWindowUpdate32(st, int32(n)) +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.serveG.check() + if n == 0 { + return + } + if n < 0 { + panic("negative update") + } + var streamID uint32 + if st != nil { + streamID = st.id + } + sc.writeFrame(frameWriteMsg{ + write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + stream: st, + }) + var ok bool + if st == nil { + ok = sc.inflow.add(n) + } else { + ok = st.inflow.add(n) + } + if !ok { + panic("internal error; sent too many window updates without decrements?") + } +} + +type requestBody struct { + stream *stream + conn *serverConn + closed bool + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue +} + +func (b *requestBody) Close() error { + if b.pipe != nil { + b.pipe.CloseWithError(errClosedBody) + } + b.closed = true + return nil +} + +func (b *requestBody) Read(p []byte) (n int, err error) { + if b.needsContinue { + b.needsContinue = false + b.conn.write100ContinueHeaders(b.stream) + } + if b.pipe == nil { + return 0, io.EOF + } + n, err = b.pipe.Read(p) + if n > 0 { + b.conn.noteBodyReadFromHandler(b.stream, n) + } + return +} + +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriterState pointer inside is zeroed at the end of a +// request (in handlerDone) and calls on the responseWriter thereafter +// simply crash (caller's mistake), but the much larger responseWriterState +// and buffers are reused between multiple requests. +type responseWriter struct { + rws *responseWriterState +} + +// Optional http.ResponseWriter interfaces implemented. +var ( + _ http.CloseNotifier = (*responseWriter)(nil) + _ http.Flusher = (*responseWriter)(nil) + _ stringWriter = (*responseWriter)(nil) +) + +type responseWriterState struct { + // immutable within a request: + stream *stream + req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't + conn *serverConn + + // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc + bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} + + // mutated by http.Handler goroutine: + handlerHeader http.Header // nil until called + snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk + status int // status code passed to WriteHeader + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 + + closeNotifierMu sync.Mutex // guards closeNotifierCh + closeNotifierCh chan bool // nil until first used +} + +type chunkWriter struct{ rws *responseWriterState } + +func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } + +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Content-Length", "Trailer": + // Forbidden by RFC 2616 14.40. + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + +// writeChunk writes chunks from the bufio.Writer. But because +// bufio.Writer may bypass its chunking, sometimes p may be +// arbitrarily large. +// +// writeChunk is also responsible (on the first chunk) for sending the +// HEADER response. +func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if !rws.wroteHeader { + rws.writeHeader(200) + } + + isHeadResp := rws.req.Method == "HEAD" + if !rws.sentHeader { + rws.sentHeader = true + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + clen64, err := strconv.ParseInt(clen, 10, 64) + if err == nil && clen64 >= 0 { + rws.sentContentLen = clen64 + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + clen = strconv.Itoa(len(p)) + } + _, hasContentType := rws.snapHeader["Content-Type"] + if !hasContentType && bodyAllowedForStatus(rws.status) { + ctype = http.DetectContentType(p) + } + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: rws.status, + h: rws.snapHeader, + endStream: endStream, + contentType: ctype, + contentLength: clen, + date: date, + }) + if err != nil { + return 0, err + } + if endStream { + return 0, nil + } + } + if isHeadResp { + return len(p), nil + } + if len(p) == 0 && !rws.handlerDone { + return 0, nil + } + + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + endStream := rws.handlerDone && !rws.hasTrailers() + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + return 0, err + } + } + + if rws.handlerDone && rws.hasTrailers() { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + return len(p), err + } + return len(p), nil +} + +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 2616 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + +func (w *responseWriter) Flush() { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.bw.Buffered() > 0 { + if err := rws.bw.Flush(); err != nil { + // Ignore the error. The frame writer already knows. + return + } + } else { + // The bufio.Writer won't call chunkWriter.Write + // (writeChunk with zero bytes, so we have to do it + // ourselves to force the HTTP response header and/or + // final DATA frame (with END_STREAM) to be sent. + rws.writeChunk(nil) + } +} + +func (w *responseWriter) CloseNotify() <-chan bool { + rws := w.rws + if rws == nil { + panic("CloseNotify called after Handler finished") + } + rws.closeNotifierMu.Lock() + ch := rws.closeNotifierCh + if ch == nil { + ch = make(chan bool, 1) + rws.closeNotifierCh = ch + go func() { + rws.stream.cw.Wait() // wait for close + ch <- true + }() + } + rws.closeNotifierMu.Unlock() + return ch +} + +func (w *responseWriter) Header() http.Header { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.handlerHeader == nil { + rws.handlerHeader = make(http.Header) + } + return rws.handlerHeader +} + +func (w *responseWriter) WriteHeader(code int) { + rws := w.rws + if rws == nil { + panic("WriteHeader called after Handler finished") + } + rws.writeHeader(code) +} + +func (rws *responseWriterState) writeHeader(code int) { + if !rws.wroteHeader { + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) + } + } +} + +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// The Life Of A Write is like this: +// +// * Handler calls w.Write or w.WriteString -> +// * -> rws.bw (*bufio.Writer) -> +// * (Handler migth call Flush) +// * -> chunkWriter{rws} +// * -> responseWriterState.writeChunk(p []byte) +// * -> responseWriterState.writeChunk (most of the magic; see comment there) +func (w *responseWriter) Write(p []byte) (n int, err error) { + return w.write(len(p), p, "") +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + return w.write(len(s), nil, s) +} + +// either dataB or dataS is non-zero. +func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { + rws := w.rws + if rws == nil { + panic("Write called after Handler finished") + } + if !rws.wroteHeader { + w.WriteHeader(200) + } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + + if dataB != nil { + return rws.bw.Write(dataB) + } else { + return rws.bw.WriteString(dataS) + } +} + +func (w *responseWriter) handlerDone() { + rws := w.rws + rws.handlerDone = true + w.Flush() + w.rws = nil + responseWriterStatePool.Put(rws) +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 2616 section 2.1 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/transport.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/transport.go new file mode 100644 index 000000000000..7d558a4bdb45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/transport.go @@ -0,0 +1,1666 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. + +package http2 + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "sort" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. +type Transport struct { + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allow. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It requires Go 1.6 or later and returns an error if the net/http package is too old +// or if t1 has already been HTTP/2-enabled. +func ConfigureTransport(t1 *http.Transport) error { + _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + return err +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + + // readLoop goroutine fields: + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closed bool + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + bw *bufio.Writer + br *bufio.Reader + fr *Framer + // Settings from peer: + maxFrameSize uint32 + maxConcurrentStreams uint32 + initialWindowSize uint32 + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred +} + +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. +type clientStream struct { + cc *ClientConn + req *http.Request + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + requestedGzip bool + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel runs in its own goroutine and waits for the user +// to either cancel a RoundTrip request (using the provided +// Request.Cancel channel), or for the request to be done (any way it +// might be removed from the cc.streams map: peer reset, successful +// completion, TCP connection breakage, etc) +func (cs *clientStream) awaitRequestCancel(cancel <-chan struct{}) { + if cancel == nil { + return + } + select { + case <-cancel: + cs.bufPipe.CloseWithError(errRequestCanceled) + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + case <-cs.done: + } +} + +// checkReset reports any error sent in a RST_STREAM frame by the +// server. +func (cs *clientStream) checkReset() error { + select { + case <-cs.peerReset: + return cs.resetErr + default: + return nil + } +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() +} + +type stickyErrWriter struct { + w io.Writer + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + n, err = sew.w.Write(p) + *sew.err = err + return +} + +var ErrNoCachedConn = errors.New("http2: no cached connection was available") + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(authority string) (addr string) { + if _, _, err := net.SplitHostPort(authority); err == nil { + return authority + } + return net.JoinHostPort(authority, "443") +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if req.URL.Scheme != "https" { + return nil, errors.New("http2: unsupported scheme") + } + + addr := authorityAddr(req.URL.Host) + for { + cc, err := t.connPool().GetClientConn(req, addr) + if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) + return nil, err + } + res, err := cc.RoundTrip(req) + if shouldRetryRequest(req, err) { + continue + } + if err != nil { + t.vlogf("RoundTrip failure: %v", err) + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + if cp, ok := t.connPool().(*clientConnPool); ok { + cp.closeIdleConnections() + } +} + +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") +) + +func shouldRetryRequest(req *http.Request, err error) bool { + // TODO: retry GET requests (no bodies) more aggressively, if shutdown + // before response. + return err == errClientConnUnusable +} + +func (t *Transport) dialClientConn(addr string) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + if err != nil { + return nil, err + } + return t.NewClientConn(tconn) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *t.TLSClientConfig + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + } + state := cn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + if VerboseLogs { + t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr()) + } + if _, err := c.Write(clientPreface); err != nil { + t.vlogf("client preface write error: %v", err) + return nil, err + } + + cc := &ClientConn{ + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + streams: make(map[uint32]*clientStream), + } + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) + cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? + cc.henc = hpack.NewEncoder(&cc.hbuf) + + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.bw.Flush() + if cc.werr != nil { + return nil, cc.werr + } + + // Read the obligatory SETTINGS frame + f, err := cc.fr.ReadFrame() + if err != nil { + return nil, err + } + sf, ok := f.(*SettingsFrame) + if !ok { + return nil, fmt.Errorf("expected settings frame, got: %T", f) + } + cc.fr.WriteSettingsAck() + cc.bw.Flush() + + sf.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingInitialWindowSize: + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE? + t.vlogf("Unhandled Setting: %v", s) + } + return nil + }) + + go cc.readLoop() + return cc, nil +} + +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.goAway = f +} + +func (cc *ClientConn) CanTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.canTakeNewRequestLocked() +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + return cc.goAway == nil && !cc.closed && + int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && + cc.nextStreamID < 2147483647 +} + +func (cc *ClientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + cc.tconn.Close() +} + +const maxAllocFrameSize = 512 << 10 + +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + // TODO: could do better allocation-wise here, but trailers are rare, + // so being lazy for now. + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return errors.New("http2: invalid Upgrade request header") + } + if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 { + return errors.New("http2: invalid Transfer-Encoding request header") + } + if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 { + return errors.New("http2: invalid Connection request header") + } + return nil +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + if err := checkConnHeaders(req); err != nil { + return nil, err + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, err + } + hasTrailers := trailers != "" + + var body io.Reader = req.Body + contentLen := req.ContentLength + if req.Body != nil && contentLen == 0 { + // Test to see if it's actually zero or just unset. + var buf [1]byte + n, rerr := io.ReadFull(body, buf[:]) + if rerr != nil && rerr != io.EOF { + contentLen = -1 + body = errorReader{rerr} + } else if n == 1 { + // Oh, guess there is data in this Body Reader after all. + // The ContentLength field just wasn't set. + // Stich the Body back together again, re-attaching our + // consumed byte. + contentLen = -1 + body = io.MultiReader(bytes.NewReader(buf[:]), body) + } else { + // Body is actually empty. + body = nil + } + } + + cc.mu.Lock() + if cc.closed || !cc.canTakeNewRequestLocked() { + cc.mu.Unlock() + return nil, errClientConnUnusable + } + + cs := cc.newStream() + cs.req = req + hasBody := body != nil + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + cs.requestedGzip = true + } + + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, hdrs) + cc.wmu.Unlock() + cc.mu.Unlock() + + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + return nil, werr + } + + var respHeaderTimer <-chan time.Time + var bodyCopyErrc chan error // result of body copy + if hasBody { + bodyCopyErrc = make(chan error, 1) + go func() { + bodyCopyErrc <- cs.writeRequestBody(body, req.Body) + }() + } else { + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + requestCanceledCh := requestCancel(req) + bodyWritten := false + + for { + select { + case re := <-readLoopResCh: + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + cc.forgetStreamID(cs.ID) + return nil, re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, nil + case <-respHeaderTimer: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, errTimeout + case <-requestCanceledCh: + cc.forgetStreamID(cs.ID) + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + return nil, errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.resetErr + case err := <-bodyCopyErrc: + if err != nil { + return nil, err + } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + frameSize := int(cc.maxFrameSize) + for len(hdrs) > 0 && cc.werr == nil { + chunk := hdrs + if len(chunk) > frameSize { + chunk = chunk[:frameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: chunk, + EndStream: endStream, + EndHeaders: endHeaders, + }) + first = false + } else { + cc.fr.WriteContinuation(streamID, endHeaders, chunk) + } + } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. + cc.bw.Flush() + return cc.werr +} + +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf) + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or opt-out? + // Use some heuristic on the body type? Nagel-like timers? + // Based on 'n'? Only last chunk of this for loop, unless flow control + // tokens are low? For now, always: + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + cc.wmu.Lock() + if !sentEnd { + var trls []byte + if hasTrailers { + cc.mu.Lock() + trls = cc.encodeTrailers(req) + cc.mu.Unlock() + } + + // Avoid forgetting to send an END_STREAM if the encoded + // trailers are 0 bytes. Both results produce and END_STREAM. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + cc.wmu.Unlock() + + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkReset(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + +// requires cc.mu be held. +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) []byte { + cc.hbuf.Reset() + + host := req.Host + if host == "" { + host = req.URL.Host + } + + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + cc.writeHeader(":authority", host) + cc.writeHeader(":method", req.Method) + if req.Method != "CONNECT" { + cc.writeHeader(":path", req.URL.RequestURI()) + cc.writeHeader(":scheme", "https") + } + if trailers != "" { + cc.writeHeader("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + lowKey := strings.ToLower(k) + switch lowKey { + case "host", "content-length": + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + case "connection", "proxy-connection", "transfer-encoding", "upgrade": + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We deal with these earlier in + // RoundTrip, deciding whether they're + // error-worthy, but we don't want to mutate + // the user's *Request so at this point, just + // skip over them at this point. + continue + case "user-agent": + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + cc.writeHeader("accept-encoding", "gzip") + } + if !didUA { + cc.writeHeader("user-agent", defaultUserAgent) + } + return cc.hbuf.Bytes() +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { + cc.hbuf.Reset() + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filter at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes() +} + +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *ClientConn) newStream() *clientStream { + cs := &clientStream{ + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), + } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + return cs +} + +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { + cc.mu.Lock() + defer cc.mu.Unlock() + cs := cc.streams[id] + if andRemove && cs != nil && !cc.closed { + delete(cc.streams, id) + close(cs.done) + } + return cs +} + +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + cc *ClientConn + activeRes map[uint32]*clientStream // keyed by streamID + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{ + cc: cc, + activeRes: make(map[uint32]*clientStream), + } + + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) + defer close(cc.readerDone) + + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + err := cc.readerErr + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + cc.mu.Lock() + for _, cs := range rl.activeRes { + cs.bufPipe.CloseWithError(err) + } + for _, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: + } + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} + +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() + gotReply := false // ever saw a reply + for { + f, err := cc.fr.ReadFrame() + if err != nil { + cc.vlogf("Transport readFrame error: (%T) %v", err, err) + } + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { + rl.endStreamError(cs, cc.fr.errDetail) + } + continue + } else if err != nil { + return err + } + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) + } + maybeIdle := false // whether frame might transition us to idle + + switch f := f.(type) { + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true + case *DataFrame: + err = rl.processData(f) + maybeIdle = true + case *GoAwayFrame: + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) + default: + cc.logf("Transport: unhandled response frame type %T", f) + } + if err != nil { + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { + cc.closeIfIdle() + } + } +} + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + if res.Body != noBody { + rl.activeRes[cs.ID] = cs + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 100 expect continue). This special +// case is going away after Issue 13851 is fixed. +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed non-numeric status pseudo header") + } + + if statusCode == 100 { + // Just skip 100-continue response headers for now. + // TODO: golang.org/issue/13851 for doing it properly. + cs.pastHeaders = false // do it all again + return nil, nil + } + + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + streamEnded := f.StreamEnded() + if !streamEnded || cs.req.Method == "HEAD" { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } + + if streamEnded { + res.Body = noBody + return res, nil + } + + buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage + cs.bufPipe = pipe{b: buf} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(requestCancel(cs.req)) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + if v := cs.inflow.available(); v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = transportDefaultStreamFlow - v + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + if cs.bufPipe.Err() != io.EOF { + // TODO: write test for this + cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } + cs.bufPipe.BreakWithError(errClosedResponseBody) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + return nil + } + if data := f.Data(); len(data) > 0 { + if cs.bufPipe.b == nil { + // Data frame after it's already closed? + cc.logf("http2: Transport received DATA frame for closed stream; closing connection") + return ConnectionError(ErrCodeProtocol) + } + + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(len(data)) { + cs.inflow.take(int32(len(data))) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + cc.mu.Unlock() + + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +var errInvalidTrailers = errors.New("http2: invalid trailers") + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + cs.bufPipe.closeWithErrorAndCode(err, code) + delete(rl.activeRes, cs.ID) + if cs.req.Close || cs.req.Header.Get("Connection") == "close" { + rl.closeWhenIdle = true + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingInitialWindowSize: + // TODO: error if this is too large. + + // TODO: adjust flow control of still-open + // frames by the difference of the old initial + // window size and this one. + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := StreamError{cs.ID, f.ErrCode} + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkReset via clientStream.awaitFlowControl + } + delete(rl.activeRes, cs.ID) + return nil +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: do something with err? send it as a debug frame to the peer? + // But that's only in GOAWAY. Invent a new frame type? Is there one already? + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/write.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/write.go new file mode 100644 index 000000000000..0143b24cd3db --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/write.go @@ -0,0 +1,262 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "log" + "net/http" + "time" + + "golang.org/x/net/http2/hpack" +) + +// writeFramer is implemented by any type that is used to write frames. +type writeFramer interface { + writeFrame(writeContext) error +} + +// writeContext is the interface needed by the various frame writer +// types below. All the writeFrame methods below are scheduled via the +// frame writing scheduler (see writeScheduler in writesched.go). +// +// This interface is implemented by *serverConn. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. +type writeContext interface { + Framer() *Framer + Flush() error + CloseConn() error + // HeaderEncoder returns an HPACK encoder that writes to the + // returned buffer. + HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) +} + +// endsStream reports whether the given frame writer w will locally +// close the stream. +func endsStream(w writeFramer) bool { + switch v := w.(type) { + case *writeData: + return v.endStream + case *writeResHeaders: + return v.endStream + case nil: + // This can only happen if the caller reuses w after it's + // been intentionally nil'ed out to prevent use. Keep this + // here to catch future refactoring breaking it. + panic("endsStream called on nil writeFramer") + } + return false +} + +type flushFrameWriter struct{} + +func (flushFrameWriter) writeFrame(ctx writeContext) error { + return ctx.Flush() +} + +type writeSettings []Setting + +func (s writeSettings) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettings([]Setting(s)...) +} + +type writeGoAway struct { + maxStreamID uint32 + code ErrCode +} + +func (p *writeGoAway) writeFrame(ctx writeContext) error { + err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) + if p.code != 0 { + ctx.Flush() // ignore error: we're hanging up on them anyway + time.Sleep(50 * time.Millisecond) + ctx.CloseConn() + } + return err +} + +type writeData struct { + streamID uint32 + p []byte + endStream bool +} + +func (w *writeData) String() string { + return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) +} + +func (w *writeData) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) +} + +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + +func (se StreamError) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) +} + +type writePingAck struct{ pf *PingFrame } + +func (w writePingAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(true, w.pf.Data) +} + +type writeSettingsAck struct{} + +func (writeSettingsAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettingsAck() +} + +// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames +// for HTTP response headers or trailers from a server handler. +type writeResHeaders struct { + streamID uint32 + httpResCode int // 0 means no ":status" line + h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. + endStream bool + + date string + contentType string + contentLength string +} + +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + +func (w *writeResHeaders) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) + } + + encodeHeaders(enc, w.h, w.trailers) + + if w.contentType != "" { + encKV(enc, "content-type", w.contentType) + } + if w.contentLength != "" { + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) + } + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 && w.trailers == nil { + panic("unexpected empty hpack") + } + + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + endHeaders := len(headerBlock) == 0 + var err error + if first { + first = false + err = ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: endHeaders, + }) + } else { + err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag) + } + if err != nil { + return err + } + } + return nil +} + +type write100ContinueHeadersFrame struct { + streamID uint32 +} + +func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + encKV(enc, ":status", "100") + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: buf.Bytes(), + EndStream: false, + EndHeaders: true, + }) +} + +type writeWindowUpdate struct { + streamID uint32 // or 0 for conn-level + n uint32 +} + +func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) +} + +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validHeaderFieldName(k) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !validHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/writesched.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/writesched.go new file mode 100644 index 000000000000..c24316ce7b2f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/net/http2/writesched.go @@ -0,0 +1,283 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "fmt" + +// frameWriteMsg is a request to write a frame. +type frameWriteMsg struct { + // write is the interface value that does the writing, once the + // writeScheduler (below) has decided to select this frame + // to write. The write functions are all defined in write.go. + write writeFramer + + stream *stream // used for prioritization. nil for non-stream frames. + + // done, if non-nil, must be a buffered channel with space for + // 1 message and is sent the return value from write (or an + // earlier error) when the frame has been written. + done chan error +} + +// for debugging only: +func (wm frameWriteMsg) String() string { + var streamID uint32 + if wm.stream != nil { + streamID = wm.stream.id + } + var des string + if s, ok := wm.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wm.write) + } + return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des) +} + +// writeScheduler tracks pending frames to write, priorities, and decides +// the next one to use. It is not thread-safe. +type writeScheduler struct { + // zero are frames not associated with a specific stream. + // They're sent before any stream-specific freams. + zero writeQueue + + // maxFrameSize is the maximum size of a DATA frame + // we'll write. Must be non-zero and between 16K-16M. + maxFrameSize uint32 + + // sq contains the stream-specific queues, keyed by stream ID. + // when a stream is idle, it's deleted from the map. + sq map[uint32]*writeQueue + + // canSend is a slice of memory that's reused between frame + // scheduling decisions to hold the list of writeQueues (from sq) + // which have enough flow control data to send. After canSend is + // built, the best is selected. + canSend []*writeQueue + + // pool of empty queues for reuse. + queuePool []*writeQueue +} + +func (ws *writeScheduler) putEmptyQueue(q *writeQueue) { + if len(q.s) != 0 { + panic("queue must be empty") + } + ws.queuePool = append(ws.queuePool, q) +} + +func (ws *writeScheduler) getEmptyQueue() *writeQueue { + ln := len(ws.queuePool) + if ln == 0 { + return new(writeQueue) + } + q := ws.queuePool[ln-1] + ws.queuePool = ws.queuePool[:ln-1] + return q +} + +func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 } + +func (ws *writeScheduler) add(wm frameWriteMsg) { + st := wm.stream + if st == nil { + ws.zero.push(wm) + } else { + ws.streamQueue(st.id).push(wm) + } +} + +func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue { + if q, ok := ws.sq[streamID]; ok { + return q + } + if ws.sq == nil { + ws.sq = make(map[uint32]*writeQueue) + } + q := ws.getEmptyQueue() + ws.sq[streamID] = q + return q +} + +// take returns the most important frame to write and removes it from the scheduler. +// It is illegal to call this if the scheduler is empty or if there are no connection-level +// flow control bytes available. +func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) { + if ws.maxFrameSize == 0 { + panic("internal error: ws.maxFrameSize not initialized or invalid") + } + + // If there any frames not associated with streams, prefer those first. + // These are usually SETTINGS, etc. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + if len(ws.sq) == 0 { + return + } + + // Next, prioritize frames on streams that aren't DATA frames (no cost). + for id, q := range ws.sq { + if q.firstIsNoCost() { + return ws.takeFrom(id, q) + } + } + + // Now, all that remains are DATA frames with non-zero bytes to + // send. So pick the best one. + if len(ws.canSend) != 0 { + panic("should be empty") + } + for _, q := range ws.sq { + if n := ws.streamWritableBytes(q); n > 0 { + ws.canSend = append(ws.canSend, q) + } + } + if len(ws.canSend) == 0 { + return + } + defer ws.zeroCanSend() + + // TODO: find the best queue + q := ws.canSend[0] + + return ws.takeFrom(q.streamID(), q) +} + +// zeroCanSend is defered from take. +func (ws *writeScheduler) zeroCanSend() { + for i := range ws.canSend { + ws.canSend[i] = nil + } + ws.canSend = ws.canSend[:0] +} + +// streamWritableBytes returns the number of DATA bytes we could write +// from the given queue's stream, if this stream/queue were +// selected. It is an error to call this if q's head isn't a +// *writeData. +func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 { + wm := q.head() + ret := wm.stream.flow.available() // max we can write + if ret == 0 { + return 0 + } + if int32(ws.maxFrameSize) < ret { + ret = int32(ws.maxFrameSize) + } + if ret == 0 { + panic("internal error: ws.maxFrameSize not initialized or invalid") + } + wd := wm.write.(*writeData) + if len(wd.p) < int(ret) { + ret = int32(len(wd.p)) + } + return ret +} + +func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) { + wm = q.head() + // If the first item in this queue costs flow control tokens + // and we don't have enough, write as much as we can. + if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 { + allowed := wm.stream.flow.available() // max we can write + if allowed == 0 { + // No quota available. Caller can try the next stream. + return frameWriteMsg{}, false + } + if int32(ws.maxFrameSize) < allowed { + allowed = int32(ws.maxFrameSize) + } + // TODO: further restrict the allowed size, because even if + // the peer says it's okay to write 16MB data frames, we might + // want to write smaller ones to properly weight competing + // streams' priorities. + + if len(wd.p) > int(allowed) { + wm.stream.flow.take(allowed) + chunk := wd.p[:allowed] + wd.p = wd.p[allowed:] + // Make up a new write message of a valid size, rather + // than shifting one off the queue. + return frameWriteMsg{ + stream: wm.stream, + write: &writeData{ + streamID: wd.streamID, + p: chunk, + // even if the original had endStream set, there + // arebytes remaining because len(wd.p) > allowed, + // so we know endStream is false: + endStream: false, + }, + // our caller is blocking on the final DATA frame, not + // these intermediates, so no need to wait: + done: nil, + }, true + } + wm.stream.flow.take(int32(len(wd.p))) + } + + q.shift() + if q.empty() { + ws.putEmptyQueue(q) + delete(ws.sq, id) + } + return wm, true +} + +func (ws *writeScheduler) forgetStream(id uint32) { + q, ok := ws.sq[id] + if !ok { + return + } + delete(ws.sq, id) + + // But keep it for others later. + for i := range q.s { + q.s[i] = frameWriteMsg{} + } + q.s = q.s[:0] + ws.putEmptyQueue(q) +} + +type writeQueue struct { + s []frameWriteMsg +} + +// streamID returns the stream ID for a non-empty stream-specific queue. +func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id } + +func (q *writeQueue) empty() bool { return len(q.s) == 0 } + +func (q *writeQueue) push(wm frameWriteMsg) { + q.s = append(q.s, wm) +} + +// head returns the next item that would be removed by shift. +func (q *writeQueue) head() frameWriteMsg { + if len(q.s) == 0 { + panic("invalid use of queue") + } + return q.s[0] +} + +func (q *writeQueue) shift() frameWriteMsg { + if len(q.s) == 0 { + panic("invalid use of queue") + } + wm := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = frameWriteMsg{} + q.s = q.s[:len(q.s)-1] + return wm +} + +func (q *writeQueue) firstIsNoCost() bool { + if df, ok := q.s[0].write.(*writeData); ok { + return len(df.p) == 0 + } + return true +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/client_appengine.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/client_appengine.go index 4a554cb9bf69..8962c49d1deb 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/client_appengine.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/client_appengine.go @@ -1,8 +1,8 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build appengine appenginevm +// +build appengine // App Engine hooks. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine.go index 65dc347314d6..dc993efb5e10 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -14,6 +14,9 @@ import ( "golang.org/x/oauth2" ) +// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs. +var appengineVM bool + // Set at init time by appengine_hook.go. If nil, we're not on App Engine. var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine_hook.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine_hook.go index 2f9b15432fa8..4f42c8b34354 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine_hook.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine_hook.go @@ -1,8 +1,8 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build appengine appenginevm +// +build appengine package google diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appenginevm_hook.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appenginevm_hook.go new file mode 100644 index 000000000000..633611cc3a01 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appenginevm_hook.go @@ -0,0 +1,14 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm + +package google + +import "google.golang.org/appengine" + +func init() { + appengineVM = true + appengineTokenFunc = appengine.AccessToken +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/default.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/default.go index 78f8089853f3..b952362977b5 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/default.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/default.go @@ -1,4 +1,4 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -50,7 +50,8 @@ func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { // On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. // On other systems, $HOME/.config/gcloud/application_default_credentials.json. // 3. On Google App Engine it uses the appengine.AccessToken function. -// 4. On Google Compute Engine, it fetches credentials from the metadata server. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. // (In this final case any provided scopes are ignored.) // // For more details, see: @@ -84,7 +85,7 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc } // Third, if we're on Google App Engine use those credentials. - if appengineTokenFunc != nil { + if appengineTokenFunc != nil && !appengineVM { return AppEngineTokenSource(ctx, scope...), nil } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/google.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/google.go index 74aa7d91194c..0bed738668b6 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/google.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/google.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -12,7 +12,7 @@ // https://developers.google.com/accounts/docs/OAuth2 // and // https://developers.google.com/accounts/docs/application-default-credentials. -package google // import "golang.org/x/oauth2/google" +package google import ( "encoding/json" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/jwt.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 000000000000..b91991786fdd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,71 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/sdk.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/sdk.go index 01ba0ecb0084..d29a3bb9bbc5 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/sdk.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/sdk.go @@ -1,4 +1,4 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/oauth2.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/oauth2.go index dc8ebfc4f76d..fbe1028d64e5 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/token.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/token.go index a17d79dd961b..739a89bfe96a 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/token.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/token.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -91,25 +91,35 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { var brokenAuthHeaderProviders = []string{ "https://accounts.google.com/", - "https://www.googleapis.com/", - "https://github.com/", - "https://api.instagram.com/", - "https://www.douban.com/", "https://api.dropbox.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", "https://api.soundcloud.com/", - "https://www.linkedin.com/", "https://api.twitch.tv/", - "https://oauth.vk.com/", - "https://api.odnoklassniki.ru/", + "https://app.box.com/", "https://connect.stripe.com/", - "https://api.pushbullet.com/", + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", "https://oauth.sandbox.trainingpeaks.com/", "https://oauth.trainingpeaks.com/", - "https://www.strava.com/oauth/", - "https://app.box.com/", + "https://oauth.vk.com/", + "https://openapi.baidu.com/", + "https://slack.com/", "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", "https://user.gini.net/", - "https://api.netatmo.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", + "https://www.wunderlist.com/oauth/", + "https://api.patreon.com/", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) } // providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/transport.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/transport.go index 521e7b49e75b..f1f173e345db 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/transport.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/transport.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -33,6 +33,11 @@ func RegisterContextClientFunc(fn ContextClientFunc) { } func ContextClient(ctx context.Context) (*http.Client, error) { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + } for _, fn := range contextClientFuncs { c, err := fn(ctx) if err != nil { @@ -42,9 +47,6 @@ func ContextClient(ctx context.Context) (*http.Client, error) { return c, nil } } - if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { - return hc, nil - } return http.DefaultClient, nil } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/jws/jws.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/jws/jws.go index 362323c4e745..b46edb27c1a8 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/jws/jws.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/jws/jws.go @@ -1,10 +1,10 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package jws provides encoding and decoding utilities for // signed JWS messages. -package jws // import "golang.org/x/oauth2/jws" +package jws import ( "bytes" @@ -27,8 +27,8 @@ type ClaimSet struct { Iss string `json:"iss"` // email address of the client_id of the application making the access token request Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). - Exp int64 `json:"exp"` // the expiration time of the assertion - Iat int64 `json:"iat"` // the time the assertion was issued. + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) Typ string `json:"typ,omitempty"` // token type (Optional). // Email for which the application is requesting delegated access (Optional). @@ -41,23 +41,22 @@ type ClaimSet struct { // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 // This array is marshalled using custom code (see (c *ClaimSet) encode()). PrivateClaims map[string]interface{} `json:"-"` - - exp time.Time - iat time.Time } func (c *ClaimSet) encode() (string, error) { - if c.exp.IsZero() || c.iat.IsZero() { - // Reverting time back for machines whose time is not perfectly in sync. - // If client machine's time is in the future according - // to Google servers, an access token will not be issued. - now := time.Now().Add(-10 * time.Second) - c.iat = now - c.exp = now.Add(time.Hour) + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) } - - c.Exp = c.exp.Unix() - c.Iat = c.iat.Unix() b, err := json.Marshal(c) if err != nil { @@ -120,8 +119,11 @@ func Decode(payload string) (*ClaimSet, error) { return c, err } -// Encode encodes a signed JWS with provided header and claim set. -func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) { +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { head, err := header.encode() if err != nil { return "", err @@ -131,14 +133,22 @@ func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, err return "", err } ss := fmt.Sprintf("%s.%s", head, cs) - h := sha256.New() - h.Write([]byte(ss)) - b, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil)) + sig, err := sg([]byte(ss)) if err != nil { return "", err } - sig := base64Encode(b) - return fmt.Sprintf("%s.%s", ss, sig), nil + return fmt.Sprintf("%s.%s", ss, base64Encode(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write([]byte(data)) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) } // base64Encode returns and Base64url encoded version of the input string with any @@ -151,6 +161,8 @@ func base64Encode(b []byte) string { func base64Decode(s string) ([]byte, error) { // add back missing padding switch len(s) % 4 { + case 1: + s += "===" case 2: s += "==" case 3: diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/jwt/jwt.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/jwt/jwt.go index 205d23ed4387..2ffad21a60db 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/jwt/jwt.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -54,6 +54,9 @@ type Config struct { // TokenURL is the endpoint required to complete the 2-legged JWT flow. TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration } // TokenSource returns a JWT TokenSource using the configuration @@ -95,6 +98,9 @@ func (js jwtSource) Token() (*oauth2.Token, error) { // to be compatible with legacy OAuth 2.0 providers. claimSet.Prn = subject } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } payload, err := jws.Encode(defaultHeader, claimSet, pk) if err != nil { return nil, err diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/oauth2.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/oauth2.go index cca8b180309b..9b7b977dabdc 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/oauth2.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/oauth2.go @@ -1,11 +1,11 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package oauth2 provides support for making // OAuth2 authorized and authenticated HTTP requests. // It can additionally grant authorization with Bearer JWT. -package oauth2 // import "golang.org/x/oauth2" +package oauth2 import ( "bytes" @@ -23,6 +23,18 @@ import ( // your own context.Context (see https://golang.org/x/net/context). var NoContext = context.TODO() +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. type Config struct { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/token.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/token.go index ebbdddbdceb4..7a3167f15b04 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/token.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/token.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -7,6 +7,7 @@ package oauth2 import ( "net/http" "net/url" + "strconv" "strings" "time" @@ -92,14 +93,28 @@ func (t *Token) WithExtra(extra interface{}) *Token { // Extra fields are key-value pairs returned by the server as a // part of the token retrieval response. func (t *Token) Extra(key string) interface{} { - if vals, ok := t.raw.(url.Values); ok { - // TODO(jbd): Cast numeric values to int64 or float64. - return vals.Get(key) - } if raw, ok := t.raw.(map[string]interface{}); ok { return raw[key] } - return nil + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v } // expired reports whether the token is expired. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/transport.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/transport.go index 90db088332b4..92ac7e2531f4 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/transport.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/golang.org/x/oauth2/transport.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/LICENSE b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/LICENSE new file mode 100644 index 000000000000..263aa7a0c123 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/backoff.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/backoff.go new file mode 100644 index 000000000000..1356140472a6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/backoff.go @@ -0,0 +1,46 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "math/rand" + "time" +) + +type BackoffStrategy interface { + // Pause returns the duration of the next pause and true if the operation should be + // retried, or false if no further retries should be attempted. + Pause() (time.Duration, bool) + + // Reset restores the strategy to its initial state. + Reset() +} + +// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff. +// The initial pause time is given by Base. +// Once the total pause time exceeds Max, Pause will indicate no further retries. +type ExponentialBackoff struct { + Base time.Duration + Max time.Duration + total time.Duration + n uint +} + +func (eb *ExponentialBackoff) Pause() (time.Duration, bool) { + if eb.total > eb.Max { + return 0, false + } + + // The next pause is selected from randomly from [0, 2^n * Base). + d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base))) + eb.total += d + eb.n++ + return d, true +} + +func (eb *ExponentialBackoff) Reset() { + eb.n = 0 + eb.total = 0 +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/buffer.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/buffer.go new file mode 100644 index 000000000000..4b8ec142441a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/buffer.go @@ -0,0 +1,77 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "bytes" + "io" + + "google.golang.org/api/googleapi" +) + +// ResumableBuffer buffers data from an io.Reader to support uploading media in retryable chunks. +type ResumableBuffer struct { + media io.Reader + + chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. + err error // Any error generated when populating chunk by reading media. + + // The absolute position of chunk in the underlying media. + off int64 +} + +func NewResumableBuffer(media io.Reader, chunkSize int) *ResumableBuffer { + return &ResumableBuffer{media: media, chunk: make([]byte, 0, chunkSize)} +} + +// Chunk returns the current buffered chunk, the offset in the underlying media +// from which the chunk is drawn, and the size of the chunk. +// Successive calls to Chunk return the same chunk between calls to Next. +func (rb *ResumableBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { + // There may already be data in chunk if Next has not been called since the previous call to Chunk. + if rb.err == nil && len(rb.chunk) == 0 { + rb.err = rb.loadChunk() + } + return bytes.NewReader(rb.chunk), rb.off, len(rb.chunk), rb.err +} + +// loadChunk will read from media into chunk, up to the capacity of chunk. +func (rb *ResumableBuffer) loadChunk() error { + bufSize := cap(rb.chunk) + rb.chunk = rb.chunk[:bufSize] + + read := 0 + var err error + for err == nil && read < bufSize { + var n int + n, err = rb.media.Read(rb.chunk[read:]) + read += n + } + rb.chunk = rb.chunk[:read] + return err +} + +// Next advances to the next chunk, which will be returned by the next call to Chunk. +// Calls to Next without a corresponding prior call to Chunk will have no effect. +func (rb *ResumableBuffer) Next() { + rb.off += int64(len(rb.chunk)) + rb.chunk = rb.chunk[0:0] +} + +type readerTyper struct { + io.Reader + googleapi.ContentTyper +} + +// ReaderAtToReader adapts a ReaderAt to be used as a Reader. +// If ra implements googleapi.ContentTyper, then the returned reader +// will also implement googleapi.ContentTyper, delegating to ra. +func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader { + r := io.NewSectionReader(ra, 0, size) + if typer, ok := ra.(googleapi.ContentTyper); ok { + return readerTyper{r, typer} + } + return r +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/doc.go new file mode 100644 index 000000000000..752c4b411b24 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gensupport is an internal implementation detail used by code +// generated by the google-api-go-generator tool. +// +// This package may be modified at any time without regard for backwards +// compatibility. It should not be used directly by API users. +package gensupport diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/json.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/json.go new file mode 100644 index 000000000000..dd7bcd2eb079 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/json.go @@ -0,0 +1,172 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +// MarshalJSON returns a JSON encoding of schema containing only selected fields. +// A field is selected if: +// * it has a non-empty value, or +// * its field name is present in forceSendFields, and +// * it is not a nil pointer or nil interface. +// The JSON key for each selected field is taken from the field's json: struct tag. +func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) { + if len(forceSendFields) == 0 { + return json.Marshal(schema) + } + + mustInclude := make(map[string]struct{}) + for _, f := range forceSendFields { + mustInclude[f] = struct{}{} + } + + dataMap, err := schemaToMap(schema, mustInclude) + if err != nil { + return nil, err + } + return json.Marshal(dataMap) +} + +func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) { + m := make(map[string]interface{}) + s := reflect.ValueOf(schema) + st := s.Type() + + for i := 0; i < s.NumField(); i++ { + jsonTag := st.Field(i).Tag.Get("json") + if jsonTag == "" { + continue + } + tag, err := parseJSONTag(jsonTag) + if err != nil { + return nil, err + } + if tag.ignore { + continue + } + + v := s.Field(i) + f := st.Field(i) + if !includeField(v, f, mustInclude) { + continue + } + + // nil maps are treated as empty maps. + if f.Type.Kind() == reflect.Map && v.IsNil() { + m[tag.apiName] = map[string]string{} + continue + } + + // nil slices are treated as empty slices. + if f.Type.Kind() == reflect.Slice && v.IsNil() { + m[tag.apiName] = []bool{} + continue + } + + if tag.stringFormat { + m[tag.apiName] = formatAsString(v, f.Type.Kind()) + } else { + m[tag.apiName] = v.Interface() + } + } + return m, nil +} + +// formatAsString returns a string representation of v, dereferencing it first if possible. +func formatAsString(v reflect.Value, kind reflect.Kind) string { + if kind == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + + return fmt.Sprintf("%v", v.Interface()) +} + +// jsonTag represents a restricted version of the struct tag format used by encoding/json. +// It is used to describe the JSON encoding of fields in a Schema struct. +type jsonTag struct { + apiName string + stringFormat bool + ignore bool +} + +// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. +// The format of the tag must match that generated by the Schema.writeSchemaStruct method +// in the api generator. +func parseJSONTag(val string) (jsonTag, error) { + if val == "-" { + return jsonTag{ignore: true}, nil + } + + var tag jsonTag + + i := strings.Index(val, ",") + if i == -1 || val[:i] == "" { + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + tag = jsonTag{ + apiName: val[:i], + } + + switch val[i+1:] { + case "omitempty": + case "omitempty,string": + tag.stringFormat = true + default: + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + return tag, nil +} + +// Reports whether the struct field "f" with value "v" should be included in JSON output. +func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool { + // The regular JSON encoding of a nil pointer is "null", which means "delete this field". + // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. + // However, many fields are not pointers, so there would be no way to delete these fields. + // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. + // Deletion will be handled by a separate mechanism. + if f.Type.Kind() == reflect.Ptr && v.IsNil() { + return false + } + + // The "any" type is represented as an interface{}. If this interface + // is nil, there is no reasonable representation to send. We ignore + // these fields, for the same reasons as given above for pointers. + if f.Type.Kind() == reflect.Interface && v.IsNil() { + return false + } + + _, ok := mustInclude[f.Name] + return ok || !isEmptyValue(v) +} + +// isEmptyValue reports whether v is the empty value for its type. This +// implementation is based on that of the encoding/json package, but its +// correctness does not depend on it being identical. What's important is that +// this function return false in situations where v should not be sent as part +// of a PATCH operation. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/media.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/media.go new file mode 100644 index 000000000000..817f46f5d274 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/media.go @@ -0,0 +1,200 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/textproto" + + "google.golang.org/api/googleapi" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// If the content type is already known, it can be specified via ctype. +// Otherwise, the content of media will be sniffed to determine the content type. +// If media implements googleapi.ContentTyper (deprecated), this will be used +// instead of sniffing the content. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { + // Note: callers could avoid calling DetectContentType if ctype != "", + // but doing the check inside this function reduces the amount of + // generated code. + if ctype != "" { + return media, ctype + } + + // For backwards compatability, allow clients to set content + // type by providing a ContentTyper for media. + if typer, ok := media.(googleapi.ContentTyper); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} + +type typeReader struct { + io.Reader + typ string +} + +// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body. +// Close must be called if reads from the multipartReader are abandoned before reaching EOF. +type multipartReader struct { + pr *io.PipeReader + pipeOpen bool + ctype string +} + +func newMultipartReader(parts []typeReader) *multipartReader { + mp := &multipartReader{pipeOpen: true} + var pw *io.PipeWriter + mp.pr, pw = io.Pipe() + mpw := multipart.NewWriter(pw) + mp.ctype = "multipart/related; boundary=" + mpw.Boundary() + go func() { + for _, part := range parts { + w, err := mpw.CreatePart(typeHeader(part.typ)) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err)) + return + } + _, err = io.Copy(w, part.Reader) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err)) + return + } + } + + mpw.Close() + pw.Close() + }() + return mp +} + +func (mp *multipartReader) Read(data []byte) (n int, err error) { + return mp.pr.Read(data) +} + +func (mp *multipartReader) Close() error { + if !mp.pipeOpen { + return nil + } + mp.pipeOpen = false + return mp.pr.Close() +} + +// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body. +// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary. +// +// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF. +func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) { + mp := newMultipartReader([]typeReader{ + {body, bodyContentType}, + {media, mediaContentType}, + }) + return mp, mp.ctype +} + +func typeHeader(contentType string) textproto.MIMEHeader { + h := make(textproto.MIMEHeader) + if contentType != "" { + h.Set("Content-Type", contentType) + } + return h +} + +// PrepareUpload determines whether the data in the supplied reader should be +// uploaded in a single request, or in sequential chunks. +// chunkSize is the size of the chunk that media should be split into. +// If chunkSize is non-zero and the contents of media do not fit in a single +// chunk (or there is an error reading media), then media will be returned as a +// ResumableBuffer. Otherwise, media will be returned as a Reader. +// +// After PrepareUpload has been called, media should no longer be used: the +// media content should be accessed via one of the return values. +func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, + *ResumableBuffer) { + if chunkSize == 0 { // do not chunk + return media, nil + } + + rb := NewResumableBuffer(media, chunkSize) + rdr, _, _, err := rb.Chunk() + + if err == io.EOF { // we can upload this in a single request + return rdr, nil + } + // err might be a non-EOF error. If it is, the next call to rb.Chunk will + // return the same error. Returning a ResumableBuffer ensures that this error + // will be handled at some point. + + return nil, rb +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/params.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/params.go new file mode 100644 index 000000000000..3b3c743967ea --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/params.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "net/url" + + "google.golang.org/api/googleapi" +) + +// URLParams is a simplified replacement for url.Values +// that safely builds up URL parameters for encoding. +type URLParams map[string][]string + +// Get returns the first value for the given key, or "". +func (u URLParams) Get(key string) string { + vs := u[key] + if len(vs) == 0 { + return "" + } + return vs[0] +} + +// Set sets the key to value. +// It replaces any existing values. +func (u URLParams) Set(key, value string) { + u[key] = []string{value} +} + +// SetMulti sets the key to an array of values. +// It replaces any existing values. +// Note that values must not be modified after calling SetMulti +// so the caller is responsible for making a copy if necessary. +func (u URLParams) SetMulti(key string, values []string) { + u[key] = values +} + +// Encode encodes the values into ``URL encoded'' form +// ("bar=baz&foo=quux") sorted by key. +func (u URLParams) Encode() string { + return url.Values(u).Encode() +} + +func SetOptions(u URLParams, opts ...googleapi.CallOption) { + for _, o := range opts { + u.Set(o.Get()) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/resumable.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/resumable.go new file mode 100644 index 000000000000..b3e774aa497a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/resumable.go @@ -0,0 +1,198 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "io" + "net/http" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // statusResumeIncomplete is the code returned by the Google uploader + // when the transfer is not yet complete. + statusResumeIncomplete = 308 + + // statusTooManyRequests is returned by the storage API if the + // per-project limits have been temporarily exceeded. The request + // should be retried. + // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes + statusTooManyRequests = 429 +) + +// ResumableUpload is used by the generated APIs to provide resumable uploads. +// It is not used by developers directly. +type ResumableUpload struct { + Client *http.Client + // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". + URI string + UserAgent string // User-Agent for header of the request + // Media is the object being uploaded. + Media *ResumableBuffer + // MediaType defines the media type, e.g. "image/jpeg". + MediaType string + + mu sync.Mutex // guards progress + progress int64 // number of bytes uploaded so far + + // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. + Callback func(int64) + + // If not specified, a default exponential backoff strategy will be used. + Backoff BackoffStrategy +} + +// Progress returns the number of bytes uploaded at this point. +func (rx *ResumableUpload) Progress() int64 { + rx.mu.Lock() + defer rx.mu.Unlock() + return rx.progress +} + +// doUploadRequest performs a single HTTP request to upload data. +// off specifies the offset in rx.Media from which data is drawn. +// size is the number of bytes in data. +// final specifies whether data is the final chunk to be uploaded. +func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) { + req, err := http.NewRequest("POST", rx.URI, data) + if err != nil { + return nil, err + } + + req.ContentLength = size + var contentRange string + if final { + if size == 0 { + contentRange = fmt.Sprintf("bytes */%v", off) + } else { + contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size) + } + } else { + contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1) + } + req.Header.Set("Content-Range", contentRange) + req.Header.Set("Content-Type", rx.MediaType) + req.Header.Set("User-Agent", rx.UserAgent) + return ctxhttp.Do(ctx, rx.Client, req) + +} + +// reportProgress calls a user-supplied callback to report upload progress. +// If old==updated, the callback is not called. +func (rx *ResumableUpload) reportProgress(old, updated int64) { + if updated-old == 0 { + return + } + rx.mu.Lock() + rx.progress = updated + rx.mu.Unlock() + if rx.Callback != nil { + rx.Callback(updated) + } +} + +// transferChunk performs a single HTTP request to upload a single chunk from rx.Media. +func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) { + chunk, off, size, err := rx.Media.Chunk() + + done := err == io.EOF + if !done && err != nil { + return nil, err + } + + res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done) + if err != nil { + return res, err + } + + if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK { + rx.reportProgress(off, off+int64(size)) + } + + if res.StatusCode == statusResumeIncomplete { + rx.Media.Next() + } + return res, nil +} + +func contextDone(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +// Upload starts the process of a resumable upload with a cancellable context. +// It retries using the provided back off strategy until cancelled or the +// strategy indicates to stop retrying. +// It is called from the auto-generated API code and is not visible to the user. +// rx is private to the auto-generated API code. +// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. +func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { + var pause time.Duration + backoff := rx.Backoff + if backoff == nil { + backoff = DefaultBackoffStrategy() + } + + for { + // Ensure that we return in the case of cancelled context, even if pause is 0. + if contextDone(ctx) { + return nil, ctx.Err() + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(pause): + } + + resp, err = rx.transferChunk(ctx) + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Check if we should retry the request. + if shouldRetry(status, err) { + var retry bool + pause, retry = backoff.Pause() + if retry { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + continue + } + } + + // If the chunk was uploaded successfully, but there's still + // more to go, upload the next chunk without any delay. + if status == statusResumeIncomplete { + pause = 0 + backoff.Reset() + resp.Body.Close() + continue + } + + // It's possible for err and resp to both be non-nil here, but we expose a simpler + // contract to our callers: exactly one of resp and err will be non-nil. This means + // that any response body must be closed here before returning a non-nil error. + if err != nil { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return nil, err + } + + return resp, nil + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/retry.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/retry.go new file mode 100644 index 000000000000..7f83d1da99fa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/retry.go @@ -0,0 +1,77 @@ +package gensupport + +import ( + "io" + "net" + "net/http" + "time" + + "golang.org/x/net/context" +) + +// Retry invokes the given function, retrying it multiple times if the connection failed or +// the HTTP status response indicates the request should be attempted again. ctx may be nil. +func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) { + for { + resp, err := f() + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Return if we shouldn't retry. + pause, retry := backoff.Pause() + if !shouldRetry(status, err) || !retry { + return resp, err + } + + // Ensure the response body is closed, if any. + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + + // Pause, but still listen to ctx.Done if context is not nil. + var done <-chan struct{} + if ctx != nil { + done = ctx.Done() + } + select { + case <-done: + return nil, ctx.Err() + case <-time.After(pause): + } + } +} + +// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests. +func DefaultBackoffStrategy() BackoffStrategy { + return &ExponentialBackoff{ + Base: 250 * time.Millisecond, + Max: 16 * time.Second, + } +} + +// shouldRetry returns true if the HTTP response / error indicates that the +// request should be attempted again. +func shouldRetry(status int, err error) bool { + // Retry for 5xx response codes. + if 500 <= status && status < 600 { + return true + } + + // Retry on statusTooManyRequests{ + if status == statusTooManyRequests { + return true + } + + // Retry on unexpected EOFs and temporary network errors. + if err == io.ErrUnexpectedEOF { + return true + } + if err, ok := err.(net.Error); ok { + return err.Temporary() + } + + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/googleapi.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/googleapi.go index 04d4424cea29..03e9acdd80ff 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/googleapi.go @@ -4,27 +4,18 @@ // Package googleapi contains the common code shared by all Google API // libraries. -package googleapi // import "google.golang.org/api/googleapi" +package googleapi import ( "bytes" "encoding/json" - "errors" "fmt" "io" "io/ioutil" - "mime/multipart" "net/http" - "net/textproto" "net/url" - "regexp" - "strconv" "strings" - "sync" - "time" - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" "google.golang.org/api/googleapi/internal/uritemplates" ) @@ -56,14 +47,15 @@ type ServerResponse struct { const ( Version = "0.5" - // statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete. - statusResumeIncomplete = 308 - // UserAgent is the header string used to identify this package. UserAgent = "google-api-go-client/" + Version - // uploadPause determines the delay between failed upload attempts - uploadPause = 1 * time.Second + // The default chunk size to use for resumable uplods if not specified by the user. + DefaultUploadChunkSize = 8 * 1024 * 1024 + + // The minimum chunk size that can be used for resumable uploads. All + // user-specified chunk sizes must be multiple of this value. + MinUploadChunkSize = 256 * 1024 ) // Error contains an error response from the server. @@ -189,52 +181,6 @@ func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { return buf, nil } -func getMediaType(media io.Reader) (io.Reader, string) { - if typer, ok := media.(ContentTyper); ok { - return media, typer.ContentType() - } - - pr, pw := io.Pipe() - typ := "application/octet-stream" - buf, err := ioutil.ReadAll(io.LimitReader(media, 512)) - if err != nil { - pw.CloseWithError(fmt.Errorf("error reading media: %v", err)) - return pr, typ - } - typ = http.DetectContentType(buf) - mr := io.MultiReader(bytes.NewReader(buf), media) - go func() { - _, err = io.Copy(pw, mr) - if err != nil { - pw.CloseWithError(fmt.Errorf("error reading media: %v", err)) - return - } - pw.Close() - }() - return pr, typ -} - -// DetectMediaType detects and returns the content type of the provided media. -// If the type can not be determined, "application/octet-stream" is returned. -func DetectMediaType(media io.ReaderAt) string { - if typer, ok := media.(ContentTyper); ok { - return typer.ContentType() - } - - typ := "application/octet-stream" - buf := make([]byte, 1024) - n, err := media.ReadAt(buf, 0) - buf = buf[:n] - if err == nil || err == io.EOF { - typ = http.DetectContentType(buf) - } - return typ -} - -type Lengther interface { - Len() int -} - // endingWithErrorReader from r until it returns an error. If the // final error from r is io.EOF and e is non-nil, e is used instead. type endingWithErrorReader struct { @@ -250,12 +196,6 @@ func (er endingWithErrorReader) Read(p []byte) (n int, err error) { return } -func typeHeader(contentType string) textproto.MIMEHeader { - h := make(textproto.MIMEHeader) - h.Set("Content-Type", contentType) - return h -} - // countingWriter counts the number of bytes it receives to write, but // discards them. type countingWriter struct { @@ -267,203 +207,65 @@ func (w countingWriter) Write(p []byte) (int, error) { return len(p), nil } -// ConditionallyIncludeMedia does nothing if media is nil. -// -// bodyp is an in/out parameter. It should initially point to the -// reader of the application/json (or whatever) payload to send in the -// API request. It's updated to point to the multipart body reader. -// -// ctypep is an in/out parameter. It should initially point to the -// content type of the bodyp, usually "application/json". It's updated -// to the "multipart/related" content type, with random boundary. -// -// The return value is the content-length of the entire multpart body. -func ConditionallyIncludeMedia(media io.Reader, bodyp *io.Reader, ctypep *string) (cancel func(), ok bool) { - if media == nil { - return - } - // Get the media type, which might return a different reader instance. - var mediaType string - media, mediaType = getMediaType(media) - - body, bodyType := *bodyp, *ctypep - - pr, pw := io.Pipe() - mpw := multipart.NewWriter(pw) - *bodyp = pr - *ctypep = "multipart/related; boundary=" + mpw.Boundary() - go func() { - w, err := mpw.CreatePart(typeHeader(bodyType)) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: body CreatePart failed: %v", err)) - return - } - _, err = io.Copy(w, body) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: body Copy failed: %v", err)) - return - } - - w, err = mpw.CreatePart(typeHeader(mediaType)) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: media CreatePart failed: %v", err)) - return - } - _, err = io.Copy(w, media) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: media Copy failed: %v", err)) - return - } - mpw.Close() - pw.Close() - }() - cancel = func() { pw.CloseWithError(errAborted) } - return cancel, true -} - -var errAborted = errors.New("googleapi: upload aborted") - // ProgressUpdater is a function that is called upon every progress update of a resumable upload. // This is the only part of a resumable upload (from googleapi) that is usable by the developer. // The remaining usable pieces of resumable uploads is exposed in each auto-generated API. type ProgressUpdater func(current, total int64) -// ResumableUpload is used by the generated APIs to provide resumable uploads. -// It is not used by developers directly. -type ResumableUpload struct { - Client *http.Client - // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". - URI string - UserAgent string // User-Agent for header of the request - // Media is the object being uploaded. - Media io.ReaderAt - // MediaType defines the media type, e.g. "image/jpeg". - MediaType string - // ContentLength is the full size of the object being uploaded. - ContentLength int64 - - mu sync.Mutex // guards progress - progress int64 // number of bytes uploaded so far - started bool // whether the upload has been started - - // Callback is an optional function that will be called upon every progress update. - Callback ProgressUpdater +type MediaOption interface { + setOptions(o *MediaOptions) } -var ( - // rangeRE matches the transfer status response from the server. $1 is the last byte index uploaded. - rangeRE = regexp.MustCompile(`^bytes=0\-(\d+)$`) - // chunkSize is the size of the chunks created during a resumable upload and should be a power of two. - // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum. - chunkSize int64 = 1 << 18 -) +type contentTypeOption string -// Progress returns the number of bytes uploaded at this point. -func (rx *ResumableUpload) Progress() int64 { - rx.mu.Lock() - defer rx.mu.Unlock() - return rx.progress +func (ct contentTypeOption) setOptions(o *MediaOptions) { + o.ContentType = string(ct) + if o.ContentType == "" { + o.ForceEmptyContentType = true + } } -func (rx *ResumableUpload) transferStatus(ctx context.Context) (int64, *http.Response, error) { - req, _ := http.NewRequest("POST", rx.URI, nil) - req.ContentLength = 0 - req.Header.Set("User-Agent", rx.UserAgent) - req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength)) - res, err := ctxhttp.Do(ctx, rx.Client, req) - if err != nil || res.StatusCode != statusResumeIncomplete { - return 0, res, err - } - var start int64 - if m := rangeRE.FindStringSubmatch(res.Header.Get("Range")); len(m) == 2 { - start, err = strconv.ParseInt(m[1], 10, 64) - if err != nil { - return 0, nil, fmt.Errorf("unable to parse range size %v", m[1]) - } - start += 1 // Start at the next byte +// ContentType returns a MediaOption which sets the Content-Type header for media uploads. +// If ctype is empty, the Content-Type header will be omitted. +func ContentType(ctype string) MediaOption { + return contentTypeOption(ctype) +} + +type chunkSizeOption int + +func (cs chunkSizeOption) setOptions(o *MediaOptions) { + size := int(cs) + if size%MinUploadChunkSize != 0 { + size += MinUploadChunkSize - (size % MinUploadChunkSize) } - return start, res, nil + o.ChunkSize = size } -type chunk struct { - body io.Reader - size int64 - err error +// ChunkSize returns a MediaOption which sets the chunk size for media uploads. +// size will be rounded up to the nearest multiple of 256K. +// Media which contains fewer than size bytes will be uploaded in a single request. +// Media which contains size bytes or more will be uploaded in separate chunks. +// If size is zero, media will be uploaded in a single request. +func ChunkSize(size int) MediaOption { + return chunkSizeOption(size) } -func (rx *ResumableUpload) transferChunks(ctx context.Context) (*http.Response, error) { - var start int64 - var err error - res := &http.Response{} - if rx.started { - start, res, err = rx.transferStatus(ctx) - if err != nil || res.StatusCode != statusResumeIncomplete { - return res, err - } - } - rx.started = true - - for { - select { // Check for cancellation - case <-ctx.Done(): - res.StatusCode = http.StatusRequestTimeout - return res, ctx.Err() - default: - } - reqSize := rx.ContentLength - start - if reqSize > chunkSize { - reqSize = chunkSize - } - r := io.NewSectionReader(rx.Media, start, reqSize) - req, _ := http.NewRequest("POST", rx.URI, r) - req.ContentLength = reqSize - req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength)) - req.Header.Set("Content-Type", rx.MediaType) - req.Header.Set("User-Agent", rx.UserAgent) - res, err = ctxhttp.Do(ctx, rx.Client, req) - start += reqSize - if err == nil && (res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK) { - rx.mu.Lock() - rx.progress = start // keep track of number of bytes sent so far - rx.mu.Unlock() - if rx.Callback != nil { - rx.Callback(start, rx.ContentLength) - } - } - if err != nil || res.StatusCode != statusResumeIncomplete { - break - } - } - return res, err +// MediaOptions stores options for customizing media upload. It is not used by developers directly. +type MediaOptions struct { + ContentType string + ForceEmptyContentType bool + + ChunkSize int } -var sleep = time.Sleep // override in unit tests - -// Upload starts the process of a resumable upload with a cancellable context. -// It retries indefinitely (with a pause of uploadPause between attempts) until cancelled. -// It is called from the auto-generated API code and is not visible to the user. -// rx is private to the auto-generated API code. -func (rx *ResumableUpload) Upload(ctx context.Context) (*http.Response, error) { - var res *http.Response - var err error - for { - res, err = rx.transferChunks(ctx) - if err != nil || res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK { - return res, err - } - select { // Check for cancellation - case <-ctx.Done(): - res.StatusCode = http.StatusRequestTimeout - return res, ctx.Err() - default: - } - sleep(uploadPause) +// ProcessMediaOptions stores options from opts in a MediaOptions. +// It is not used by developers directly. +func ProcessMediaOptions(opts []MediaOption) *MediaOptions { + mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize} + for _, o := range opts { + o.setOptions(mo) } - return res, err + return mo } func ResolveRelative(basestr, relstr string) string { @@ -590,3 +392,33 @@ func CombineFields(s []Field) string { } return strings.Join(r, ",") } + +// A CallOption is an optional argument to an API call. +// It should be treated as an opaque value by users of Google APIs. +// +// A CallOption is something that configures an API call in a way that is +// not specific to that API; for instance, controlling the quota user for +// an API call is common across many APIs, and is thus a CallOption. +type CallOption interface { + Get() (key, value string) +} + +// QuotaUser returns a CallOption that will set the quota user for a call. +// The quota user can be used by server-side applications to control accounting. +// It can be an arbitrary string up to 40 characters, and will override UserIP +// if both are provided. +func QuotaUser(u string) CallOption { return quotaUser(u) } + +type quotaUser string + +func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) } + +// UserIP returns a CallOption that will set the "userIp" parameter of a call. +// This should be the IP address of the originating request. +func UserIP(ip string) CallOption { return userIP(ip) } + +type userIP string + +func (i userIP) Get() (string, string) { return "userIp", string(i) } + +// TODO: Fields too diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go index 8a84813fe52e..7c103ba1386d 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go @@ -2,26 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package uritemplates is a level 4 implementation of RFC 6570 (URI +// Package uritemplates is a level 3 implementation of RFC 6570 (URI // Template, http://tools.ietf.org/html/rfc6570). -// -// To use uritemplates, parse a template string and expand it with a value -// map: -// -// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}") -// values := make(map[string]interface{}) -// values["user"] = "jtacoma" -// values["repo"] = "uritemplates" -// expanded, _ := template.ExpandString(values) -// fmt.Printf(expanded) -// +// uritemplates does not support composite values (in Go: slices or maps) +// and so does not qualify as a level 4 implementation. package uritemplates import ( "bytes" "errors" - "fmt" - "reflect" "regexp" "strconv" "strings" @@ -45,52 +34,47 @@ func pctEncode(src []byte) []byte { return dst } -func escape(s string, allowReserved bool) (escaped string) { +func escape(s string, allowReserved bool) string { if allowReserved { - escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) - } else { - escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) + return string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) } - return escaped + return string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) } -// A UriTemplate is a parsed representation of a URI template. -type UriTemplate struct { +// A uriTemplate is a parsed representation of a URI template. +type uriTemplate struct { raw string parts []templatePart } -// Parse parses a URI template string into a UriTemplate object. -func Parse(rawtemplate string) (template *UriTemplate, err error) { - template = new(UriTemplate) - template.raw = rawtemplate - split := strings.Split(rawtemplate, "{") - template.parts = make([]templatePart, len(split)*2-1) +// parse parses a URI template string into a uriTemplate object. +func parse(rawTemplate string) (*uriTemplate, error) { + split := strings.Split(rawTemplate, "{") + parts := make([]templatePart, len(split)*2-1) for i, s := range split { if i == 0 { if strings.Contains(s, "}") { - err = errors.New("unexpected }") - break - } - template.parts[i].raw = s - } else { - subsplit := strings.Split(s, "}") - if len(subsplit) != 2 { - err = errors.New("malformed template") - break + return nil, errors.New("unexpected }") } - expression := subsplit[0] - template.parts[i*2-1], err = parseExpression(expression) - if err != nil { - break - } - template.parts[i*2].raw = subsplit[1] + parts[i].raw = s + continue } + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + return nil, errors.New("malformed template") + } + expression := subsplit[0] + var err error + parts[i*2-1], err = parseExpression(expression) + if err != nil { + return nil, err + } + parts[i*2].raw = subsplit[1] } - if err != nil { - template = nil - } - return template, err + return &uriTemplate{ + raw: rawTemplate, + parts: parts, + }, nil } type templatePart struct { @@ -160,6 +144,8 @@ func parseExpression(expression string) (result templatePart, err error) { } func parseTerm(term string) (result templateTerm, err error) { + // TODO(djd): Remove "*" suffix parsing once we check that no APIs have + // mistakenly used that attribute. if strings.HasSuffix(term, "*") { result.explode = true term = term[:len(term)-1] @@ -185,175 +171,50 @@ func parseTerm(term string) (result templateTerm, err error) { } // Expand expands a URI template with a set of values to produce a string. -func (self *UriTemplate) Expand(value interface{}) (string, error) { - values, ismap := value.(map[string]interface{}) - if !ismap { - if m, ismap := struct2map(value); !ismap { - return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.") - } else { - return self.Expand(m) - } - } +func (t *uriTemplate) Expand(values map[string]string) string { var buf bytes.Buffer - for _, p := range self.parts { - err := p.expand(&buf, values) - if err != nil { - return "", err - } + for _, p := range t.parts { + p.expand(&buf, values) } - return buf.String(), nil + return buf.String() } -func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error { - if len(self.raw) > 0 { - buf.WriteString(self.raw) - return nil +func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) { + if len(tp.raw) > 0 { + buf.WriteString(tp.raw) + return } - var zeroLen = buf.Len() - buf.WriteString(self.first) - var firstLen = buf.Len() - for _, term := range self.terms { + var first = true + for _, term := range tp.terms { value, exists := values[term.name] if !exists { continue } - if buf.Len() != firstLen { - buf.WriteString(self.sep) - } - switch v := value.(type) { - case string: - self.expandString(buf, term, v) - case []interface{}: - self.expandArray(buf, term, v) - case map[string]interface{}: - if term.truncate > 0 { - return errors.New("cannot truncate a map expansion") - } - self.expandMap(buf, term, v) - default: - if m, ismap := struct2map(value); ismap { - if term.truncate > 0 { - return errors.New("cannot truncate a map expansion") - } - self.expandMap(buf, term, m) - } else { - str := fmt.Sprintf("%v", value) - self.expandString(buf, term, str) - } + if first { + buf.WriteString(tp.first) + first = false + } else { + buf.WriteString(tp.sep) } + tp.expandString(buf, term, value) } - if buf.Len() == firstLen { - original := buf.Bytes()[:zeroLen] - buf.Reset() - buf.Write(original) - } - return nil } -func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { - if self.named { +func (tp *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if tp.named { buf.WriteString(name) if empty { - buf.WriteString(self.ifemp) + buf.WriteString(tp.ifemp) } else { buf.WriteString("=") } } } -func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { +func (tp *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { if len(s) > t.truncate && t.truncate > 0 { s = s[:t.truncate] } - self.expandName(buf, t.name, len(s) == 0) - buf.WriteString(escape(s, self.allowReserved)) -} - -func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) { - if len(a) == 0 { - return - } else if !t.explode { - self.expandName(buf, t.name, false) - } - for i, value := range a { - if t.explode && i > 0 { - buf.WriteString(self.sep) - } else if i > 0 { - buf.WriteString(",") - } - var s string - switch v := value.(type) { - case string: - s = v - default: - s = fmt.Sprintf("%v", v) - } - if len(s) > t.truncate && t.truncate > 0 { - s = s[:t.truncate] - } - if self.named && t.explode { - self.expandName(buf, t.name, len(s) == 0) - } - buf.WriteString(escape(s, self.allowReserved)) - } -} - -func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) { - if len(m) == 0 { - return - } - if !t.explode { - self.expandName(buf, t.name, len(m) == 0) - } - var firstLen = buf.Len() - for k, value := range m { - if firstLen != buf.Len() { - if t.explode { - buf.WriteString(self.sep) - } else { - buf.WriteString(",") - } - } - var s string - switch v := value.(type) { - case string: - s = v - default: - s = fmt.Sprintf("%v", v) - } - if t.explode { - buf.WriteString(escape(k, self.allowReserved)) - buf.WriteRune('=') - buf.WriteString(escape(s, self.allowReserved)) - } else { - buf.WriteString(escape(k, self.allowReserved)) - buf.WriteRune(',') - buf.WriteString(escape(s, self.allowReserved)) - } - } -} - -func struct2map(v interface{}) (map[string]interface{}, bool) { - value := reflect.ValueOf(v) - switch value.Type().Kind() { - case reflect.Ptr: - return struct2map(value.Elem().Interface()) - case reflect.Struct: - m := make(map[string]interface{}) - for i := 0; i < value.NumField(); i++ { - tag := value.Type().Field(i).Tag - var name string - if strings.Contains(string(tag), ":") { - name = tag.Get("uri") - } else { - name = strings.TrimSpace(string(tag)) - } - if len(name) == 0 { - name = value.Type().Field(i).Name - } - m[name] = value.Field(i).Interface() - } - return m, true - } - return nil, false + tp.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, tp.allowReserved)) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go index 399ef4623698..eff260a6925f 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go @@ -1,13 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package uritemplates -func Expand(path string, expansions map[string]string) (string, error) { - template, err := Parse(path) +func Expand(path string, values map[string]string) (string, error) { + template, err := parse(path) if err != nil { return "", err } - values := make(map[string]interface{}) - for k, v := range expansions { - values[k] = v - } - return template.Expand(values) + return template.Expand(values), nil } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-api.json b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-api.json index d97a4687c96a..3768b46877ad 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -1,13 +1,13 @@ { "kind": "discovery#restDescription", - "etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/N6tKJW3LYMQm0Iy7CDmwkEgtf6o\"", + "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/KVPQfwGxQTBtH0g1kuij0C9i4uc\"", "discoveryVersion": "v1", "id": "storage:v1", "name": "storage", "version": "v1", - "revision": "20151007", + "revision": "20160304", "title": "Cloud Storage JSON API", - "description": "Lets you store and retrieve potentially-large, immutable data objects.", + "description": "Stores and retrieves potentially large, immutable data objects.", "ownerDomain": "google.com", "ownerName": "Google", "icons": { @@ -574,18 +574,26 @@ }, "contentType": { "type": "string", - "description": "Content-Type of the object data.", - "annotations": { - "required": [ - "storage.objects.insert", - "storage.objects.update" - ] - } + "description": "Content-Type of the object data." }, "crc32c": { "type": "string", "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices." }, + "customerEncryption": { + "type": "object", + "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.", + "properties": { + "encryptionAlgorithm": { + "type": "string", + "description": "The encryption algorithm." + }, + "keySha256": { + "type": "string", + "description": "SHA256 hash value of the encryption key." + } + } + }, "etag": { "type": "string", "description": "HTTP 1.1 Entity tag for the object." @@ -855,6 +863,7 @@ "entity" ], "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -885,6 +894,7 @@ "$ref": "BucketAccessControl" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -911,6 +921,7 @@ "$ref": "BucketAccessControl" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -934,6 +945,7 @@ "$ref": "BucketAccessControls" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -967,6 +979,7 @@ "$ref": "BucketAccessControl" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -1000,6 +1013,7 @@ "$ref": "BucketAccessControl" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] } @@ -1466,6 +1480,7 @@ "entity" ], "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -1496,6 +1511,7 @@ "$ref": "ObjectAccessControl" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -1522,6 +1538,7 @@ "$ref": "ObjectAccessControl" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -1557,6 +1574,7 @@ "$ref": "ObjectAccessControls" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -1590,6 +1608,7 @@ "$ref": "ObjectAccessControl" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -1623,6 +1642,7 @@ "$ref": "ObjectAccessControl" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] } @@ -1667,6 +1687,7 @@ "entity" ], "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -1710,6 +1731,7 @@ "$ref": "ObjectAccessControl" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -1749,6 +1771,7 @@ "$ref": "ObjectAccessControl" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -1785,6 +1808,7 @@ "$ref": "ObjectAccessControls" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -1831,6 +1855,7 @@ "$ref": "ObjectAccessControl" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] }, @@ -1877,6 +1902,7 @@ "$ref": "ObjectAccessControl" }, "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" ] } @@ -1951,7 +1977,8 @@ "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_write" ], - "supportsMediaDownload": true + "supportsMediaDownload": true, + "useMediaDownloadService": true }, "copy": { "id": "storage.objects.copy", @@ -2089,7 +2116,8 @@ "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_write" ], - "supportsMediaDownload": true + "supportsMediaDownload": true, + "useMediaDownloadService": true }, "delete": { "id": "storage.objects.delete", @@ -2226,7 +2254,8 @@ "https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/devstorage.read_write" ], - "supportsMediaDownload": true + "supportsMediaDownload": true, + "useMediaDownloadService": true }, "insert": { "id": "storage.objects.insert", @@ -2324,6 +2353,7 @@ "https://www.googleapis.com/auth/devstorage.read_write" ], "supportsMediaDownload": true, + "useMediaDownloadService": true, "supportsMediaUpload": true, "mediaUpload": { "accept": [ @@ -2754,7 +2784,8 @@ "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_write" ], - "supportsMediaDownload": true + "supportsMediaDownload": true, + "useMediaDownloadService": true }, "watchAll": { "id": "storage.objects.watchAll", diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-gen.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-gen.go index f690ddfa86f7..a299044395b2 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -7,17 +7,17 @@ // import "google.golang.org/api/storage/v1" // ... // storageService, err := storage.New(oauthHttpClient) -package storage // import "google.golang.org/api/storage/v1" +package storage import ( "bytes" "encoding/json" "errors" "fmt" - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" - "google.golang.org/api/googleapi" - "google.golang.org/api/internal" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" "io" "net/http" "net/url" @@ -33,10 +33,10 @@ var _ = fmt.Sprintf var _ = json.NewDecoder var _ = io.Copy var _ = url.Parse +var _ = gensupport.MarshalJSON var _ = googleapi.Version var _ = errors.New var _ = strings.Replace -var _ = internal.MarshalJSON var _ = context.Canceled var _ = ctxhttp.Do @@ -244,7 +244,7 @@ type Bucket struct { func (s *Bucket) MarshalJSON() ([]byte, error) { type noMethod Bucket raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } type BucketCors struct { @@ -279,7 +279,7 @@ type BucketCors struct { func (s *BucketCors) MarshalJSON() ([]byte, error) { type noMethod BucketCors raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // BucketLifecycle: The bucket's lifecycle configuration. See lifecycle @@ -301,7 +301,7 @@ type BucketLifecycle struct { func (s *BucketLifecycle) MarshalJSON() ([]byte, error) { type noMethod BucketLifecycle raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } type BucketLifecycleRule struct { @@ -323,7 +323,7 @@ type BucketLifecycleRule struct { func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) { type noMethod BucketLifecycleRule raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // BucketLifecycleRuleAction: The action to take. @@ -343,7 +343,7 @@ type BucketLifecycleRuleAction struct { func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { type noMethod BucketLifecycleRuleAction raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // BucketLifecycleRuleCondition: The condition(s) under which the action @@ -380,7 +380,7 @@ type BucketLifecycleRuleCondition struct { func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { type noMethod BucketLifecycleRuleCondition raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // BucketLogging: The bucket's logging configuration, which defines the @@ -406,7 +406,7 @@ type BucketLogging struct { func (s *BucketLogging) MarshalJSON() ([]byte, error) { type noMethod BucketLogging raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // BucketOwner: The owner of the bucket. This is always the project @@ -430,7 +430,7 @@ type BucketOwner struct { func (s *BucketOwner) MarshalJSON() ([]byte, error) { type noMethod BucketOwner raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // BucketVersioning: The bucket's versioning configuration. @@ -451,7 +451,7 @@ type BucketVersioning struct { func (s *BucketVersioning) MarshalJSON() ([]byte, error) { type noMethod BucketVersioning raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // BucketWebsite: The bucket's website configuration. @@ -476,7 +476,7 @@ type BucketWebsite struct { func (s *BucketWebsite) MarshalJSON() ([]byte, error) { type noMethod BucketWebsite raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // BucketAccessControl: An access-control entry. @@ -546,7 +546,7 @@ type BucketAccessControl struct { func (s *BucketAccessControl) MarshalJSON() ([]byte, error) { type noMethod BucketAccessControl raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // BucketAccessControlProjectTeam: The project team associated with the @@ -570,7 +570,7 @@ type BucketAccessControlProjectTeam struct { func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { type noMethod BucketAccessControlProjectTeam raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // BucketAccessControls: An access-control list. @@ -598,7 +598,7 @@ type BucketAccessControls struct { func (s *BucketAccessControls) MarshalJSON() ([]byte, error) { type noMethod BucketAccessControls raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // Buckets: A list of buckets. @@ -631,7 +631,7 @@ type Buckets struct { func (s *Buckets) MarshalJSON() ([]byte, error) { type noMethod Buckets raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // Channel: An notification channel used to watch for resource changes. @@ -689,7 +689,7 @@ type Channel struct { func (s *Channel) MarshalJSON() ([]byte, error) { type noMethod Channel raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // ComposeRequest: A Compose request. @@ -716,7 +716,7 @@ type ComposeRequest struct { func (s *ComposeRequest) MarshalJSON() ([]byte, error) { type noMethod ComposeRequest raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } type ComposeRequestSourceObjects struct { @@ -743,7 +743,7 @@ type ComposeRequestSourceObjects struct { func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { type noMethod ComposeRequestSourceObjects raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // ComposeRequestSourceObjectsObjectPreconditions: Conditions that must @@ -767,7 +767,7 @@ type ComposeRequestSourceObjectsObjectPreconditions struct { func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { type noMethod ComposeRequestSourceObjectsObjectPreconditions raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // Object: An object. @@ -803,6 +803,10 @@ type Object struct { // Practices. Crc32c string `json:"crc32c,omitempty"` + // CustomerEncryption: Metadata of customer-supplied encryption key, if + // the object is encrypted by such a key. + CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"` + // Etag: HTTP 1.1 Entity tag for the object. Etag string `json:"etag,omitempty"` @@ -879,7 +883,31 @@ type Object struct { func (s *Object) MarshalJSON() ([]byte, error) { type noMethod Object raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ObjectCustomerEncryption: Metadata of customer-supplied encryption +// key, if the object is encrypted by such a key. +type ObjectCustomerEncryption struct { + // EncryptionAlgorithm: The encryption algorithm. + EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` + + // KeySha256: SHA256 hash value of the encryption key. + KeySha256 string `json:"keySha256,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { + type noMethod ObjectCustomerEncryption + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // ObjectOwner: The owner of the object. This will always be the @@ -903,7 +931,7 @@ type ObjectOwner struct { func (s *ObjectOwner) MarshalJSON() ([]byte, error) { type noMethod ObjectOwner raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // ObjectAccessControl: An access-control entry. @@ -978,7 +1006,7 @@ type ObjectAccessControl struct { func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) { type noMethod ObjectAccessControl raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // ObjectAccessControlProjectTeam: The project team associated with the @@ -1002,7 +1030,7 @@ type ObjectAccessControlProjectTeam struct { func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { type noMethod ObjectAccessControlProjectTeam raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // ObjectAccessControls: An access-control list. @@ -1030,7 +1058,7 @@ type ObjectAccessControls struct { func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) { type noMethod ObjectAccessControls raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // Objects: A list of objects. @@ -1067,7 +1095,7 @@ type Objects struct { func (s *Objects) MarshalJSON() ([]byte, error) { type noMethod Objects raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // RewriteResponse: A rewrite response. @@ -1114,39 +1142,39 @@ type RewriteResponse struct { func (s *RewriteResponse) MarshalJSON() ([]byte, error) { type noMethod RewriteResponse raw := noMethod(*s) - return internal.MarshalJSON(raw, s.ForceSendFields) + return gensupport.MarshalJSON(raw, s.ForceSendFields) } // method id "storage.bucketAccessControls.delete": type BucketAccessControlsDeleteCall struct { - s *Service - bucket string - entity string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context } // Delete: Permanently deletes the ACL entry for the specified entity on // the specified bucket. func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { - c := &BucketAccessControlsDeleteCall{s: r.s, opt_: make(map[string]interface{})} + c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { c.ctx_ = ctx return c @@ -1154,13 +1182,9 @@ func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAcc func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -1174,7 +1198,8 @@ func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, } // Do executes the "storage.bucketAccessControls.delete" call. -func (c *BucketAccessControlsDeleteCall) Do() error { +func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if err != nil { return err @@ -1208,6 +1233,7 @@ func (c *BucketAccessControlsDeleteCall) Do() error { // }, // "path": "b/{bucket}/acl/{entity}", // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -1217,27 +1243,28 @@ func (c *BucketAccessControlsDeleteCall) Do() error { // method id "storage.bucketAccessControls.get": type BucketAccessControlsGetCall struct { - s *Service - bucket string - entity string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context } // Get: Returns the ACL entry for the specified entity on the specified // bucket. func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { - c := &BucketAccessControlsGetCall{s: r.s, opt_: make(map[string]interface{})} + c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -1247,13 +1274,13 @@ func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccess // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { - c.opt_["ifNoneMatch"] = entityTag + c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { c.ctx_ = ctx return c @@ -1261,21 +1288,17 @@ func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccess func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) req.Header.Set("User-Agent", c.s.userAgent()) - if v, ok := c.opt_["ifNoneMatch"]; ok { - req.Header.Set("If-None-Match", fmt.Sprintf("%v", v)) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) } if c.ctx_ != nil { return ctxhttp.Do(c.ctx_, c.s.client, req) @@ -1290,7 +1313,8 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsGetCall) Do() (*BucketAccessControl, error) { +func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -1345,6 +1369,7 @@ func (c *BucketAccessControlsGetCall) Do() (*BucketAccessControl, error) { // "$ref": "BucketAccessControl" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -1357,29 +1382,29 @@ type BucketAccessControlsInsertCall struct { s *Service bucket string bucketaccesscontrol *BucketAccessControl - opt_ map[string]interface{} + urlParams_ gensupport.URLParams ctx_ context.Context } // Insert: Creates a new ACL entry on the specified bucket. func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { - c := &BucketAccessControlsInsertCall{s: r.s, opt_: make(map[string]interface{})} + c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.bucketaccesscontrol = bucketaccesscontrol return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { c.ctx_ = ctx return c @@ -1392,13 +1417,9 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -1418,7 +1439,8 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsInsertCall) Do() (*BucketAccessControl, error) { +func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -1469,6 +1491,7 @@ func (c *BucketAccessControlsInsertCall) Do() (*BucketAccessControl, error) { // "$ref": "BucketAccessControl" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -1478,24 +1501,25 @@ func (c *BucketAccessControlsInsertCall) Do() (*BucketAccessControl, error) { // method id "storage.bucketAccessControls.list": type BucketAccessControlsListCall struct { - s *Service - bucket string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context } // List: Retrieves ACL entries on the specified bucket. func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { - c := &BucketAccessControlsListCall{s: r.s, opt_: make(map[string]interface{})} + c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -1505,13 +1529,13 @@ func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAcces // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { - c.opt_["ifNoneMatch"] = entityTag + c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { c.ctx_ = ctx return c @@ -1519,20 +1543,16 @@ func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAcces func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) req.Header.Set("User-Agent", c.s.userAgent()) - if v, ok := c.opt_["ifNoneMatch"]; ok { - req.Header.Set("If-None-Match", fmt.Sprintf("%v", v)) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) } if c.ctx_ != nil { return ctxhttp.Do(c.ctx_, c.s.client, req) @@ -1547,7 +1567,8 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsListCall) Do() (*BucketAccessControls, error) { +func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -1595,6 +1616,7 @@ func (c *BucketAccessControlsListCall) Do() (*BucketAccessControls, error) { // "$ref": "BucketAccessControls" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -1608,31 +1630,31 @@ type BucketAccessControlsPatchCall struct { bucket string entity string bucketaccesscontrol *BucketAccessControl - opt_ map[string]interface{} + urlParams_ gensupport.URLParams ctx_ context.Context } // Patch: Updates an ACL entry on the specified bucket. This method // supports patch semantics. func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { - c := &BucketAccessControlsPatchCall{s: r.s, opt_: make(map[string]interface{})} + c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity c.bucketaccesscontrol = bucketaccesscontrol return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { c.ctx_ = ctx return c @@ -1645,13 +1667,9 @@ func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, e return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -1672,7 +1690,8 @@ func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, e // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsPatchCall) Do() (*BucketAccessControl, error) { +func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -1730,6 +1749,7 @@ func (c *BucketAccessControlsPatchCall) Do() (*BucketAccessControl, error) { // "$ref": "BucketAccessControl" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -1743,30 +1763,30 @@ type BucketAccessControlsUpdateCall struct { bucket string entity string bucketaccesscontrol *BucketAccessControl - opt_ map[string]interface{} + urlParams_ gensupport.URLParams ctx_ context.Context } // Update: Updates an ACL entry on the specified bucket. func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { - c := &BucketAccessControlsUpdateCall{s: r.s, opt_: make(map[string]interface{})} + c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity c.bucketaccesscontrol = bucketaccesscontrol return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { c.ctx_ = ctx return c @@ -1779,13 +1799,9 @@ func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -1806,7 +1822,8 @@ func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsUpdateCall) Do() (*BucketAccessControl, error) { +func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -1864,6 +1881,7 @@ func (c *BucketAccessControlsUpdateCall) Do() (*BucketAccessControl, error) { // "$ref": "BucketAccessControl" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -1873,15 +1891,15 @@ func (c *BucketAccessControlsUpdateCall) Do() (*BucketAccessControl, error) { // method id "storage.buckets.delete": type BucketsDeleteCall struct { - s *Service - bucket string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context } // Delete: Permanently deletes an empty bucket. func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { - c := &BucketsDeleteCall{s: r.s, opt_: make(map[string]interface{})} + c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket return c } @@ -1890,7 +1908,7 @@ func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { // "ifMetagenerationMatch": If set, only deletes the bucket if its // metageneration matches this value. func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { - c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } @@ -1898,21 +1916,21 @@ func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) * // "ifMetagenerationNotMatch": If set, only deletes the bucket if its // metageneration does not match this value. func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { - c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { c.ctx_ = ctx return c @@ -1920,19 +1938,9 @@ func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["ifMetagenerationMatch"]; ok { - params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { - params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -1945,7 +1953,8 @@ func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.buckets.delete" call. -func (c *BucketsDeleteCall) Do() error { +func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if err != nil { return err @@ -1995,15 +2004,16 @@ func (c *BucketsDeleteCall) Do() error { // method id "storage.buckets.get": type BucketsGetCall struct { - s *Service - bucket string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context } // Get: Returns metadata for the specified bucket. func (r *BucketsService) Get(bucket string) *BucketsGetCall { - c := &BucketsGetCall{s: r.s, opt_: make(map[string]interface{})} + c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket return c } @@ -2013,7 +2023,7 @@ func (r *BucketsService) Get(bucket string) *BucketsGetCall { // conditional on whether the bucket's current metageneration matches // the given value. func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { - c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } @@ -2022,7 +2032,7 @@ func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *Buc // conditional on whether the bucket's current metageneration does not // match the given value. func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { - c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } @@ -2033,15 +2043,15 @@ func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64 // "full" - Include all properties. // "noAcl" - Omit acl and defaultObjectAcl properties. func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { - c.opt_["projection"] = projection + c.urlParams_.Set("projection", projection) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -2051,13 +2061,13 @@ func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { - c.opt_["ifNoneMatch"] = entityTag + c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { c.ctx_ = ctx return c @@ -2065,29 +2075,16 @@ func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["ifMetagenerationMatch"]; ok { - params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { - params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["projection"]; ok { - params.Set("projection", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) req.Header.Set("User-Agent", c.s.userAgent()) - if v, ok := c.opt_["ifNoneMatch"]; ok { - req.Header.Set("If-None-Match", fmt.Sprintf("%v", v)) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) } if c.ctx_ != nil { return ctxhttp.Do(c.ctx_, c.s.client, req) @@ -2102,7 +2099,8 @@ func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *BucketsGetCall) Do() (*Bucket, error) { +func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -2188,17 +2186,16 @@ func (c *BucketsGetCall) Do() (*Bucket, error) { // method id "storage.buckets.insert": type BucketsInsertCall struct { - s *Service - projectid string - bucket *Bucket - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context } // Insert: Creates a new bucket. func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { - c := &BucketsInsertCall{s: r.s, opt_: make(map[string]interface{})} - c.projectid = projectid + c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) c.bucket = bucket return c } @@ -2217,7 +2214,7 @@ func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsert // "publicReadWrite" - Project team owners get OWNER access, and // allUsers get WRITER access. func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { - c.opt_["predefinedAcl"] = predefinedAcl + c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } @@ -2238,7 +2235,7 @@ func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCa // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { - c.opt_["predefinedDefaultObjectAcl"] = predefinedDefaultObjectAcl + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) return c } @@ -2251,21 +2248,21 @@ func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAc // "full" - Include all properties. // "noAcl" - Omit acl and defaultObjectAcl properties. func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { - c.opt_["projection"] = projection + c.urlParams_.Set("projection", projection) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { c.ctx_ = ctx return c @@ -2278,23 +2275,9 @@ func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - params.Set("project", fmt.Sprintf("%v", c.projectid)) - if v, ok := c.opt_["predefinedAcl"]; ok { - params.Set("predefinedAcl", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["predefinedDefaultObjectAcl"]; ok { - params.Set("predefinedDefaultObjectAcl", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["projection"]; ok { - params.Set("projection", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.SetOpaque(req.URL) req.Header.Set("Content-Type", ctype) @@ -2312,7 +2295,8 @@ func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *BucketsInsertCall) Do() (*Bucket, error) { +func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -2427,23 +2411,23 @@ func (c *BucketsInsertCall) Do() (*Bucket, error) { // method id "storage.buckets.list": type BucketsListCall struct { - s *Service - projectid string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context } // List: Retrieves a list of buckets for a given project. func (r *BucketsService) List(projectid string) *BucketsListCall { - c := &BucketsListCall{s: r.s, opt_: make(map[string]interface{})} - c.projectid = projectid + c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) return c } // MaxResults sets the optional parameter "maxResults": Maximum number // of buckets to return. func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { - c.opt_["maxResults"] = maxResults + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -2451,14 +2435,14 @@ func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { // previously-returned page token representing part of the larger set of // results to view. func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { - c.opt_["pageToken"] = pageToken + c.urlParams_.Set("pageToken", pageToken) return c } // Prefix sets the optional parameter "prefix": Filter results to // buckets whose names begin with this prefix. func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { - c.opt_["prefix"] = prefix + c.urlParams_.Set("prefix", prefix) return c } @@ -2469,15 +2453,15 @@ func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { // "full" - Include all properties. // "noAcl" - Omit acl and defaultObjectAcl properties. func (c *BucketsListCall) Projection(projection string) *BucketsListCall { - c.opt_["projection"] = projection + c.urlParams_.Set("projection", projection) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -2487,13 +2471,13 @@ func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { - c.opt_["ifNoneMatch"] = entityTag + c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { c.ctx_ = ctx return c @@ -2501,31 +2485,14 @@ func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - params.Set("project", fmt.Sprintf("%v", c.projectid)) - if v, ok := c.opt_["maxResults"]; ok { - params.Set("maxResults", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["pageToken"]; ok { - params.Set("pageToken", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["prefix"]; ok { - params.Set("prefix", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["projection"]; ok { - params.Set("projection", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.SetOpaque(req.URL) req.Header.Set("User-Agent", c.s.userAgent()) - if v, ok := c.opt_["ifNoneMatch"]; ok { - req.Header.Set("If-None-Match", fmt.Sprintf("%v", v)) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) } if c.ctx_ != nil { return ctxhttp.Do(c.ctx_, c.s.client, req) @@ -2540,7 +2507,8 @@ func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *BucketsListCall) Do() (*Buckets, error) { +func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -2628,19 +2596,40 @@ func (c *BucketsListCall) Do() (*Buckets, error) { } +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "storage.buckets.patch": type BucketsPatchCall struct { - s *Service - bucket string - bucket2 *Bucket - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context } // Patch: Updates a bucket. This method supports patch semantics. func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { - c := &BucketsPatchCall{s: r.s, opt_: make(map[string]interface{})} + c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.bucket2 = bucket2 return c @@ -2651,7 +2640,7 @@ func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall // conditional on whether the bucket's current metageneration matches // the given value. func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { - c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } @@ -2660,7 +2649,7 @@ func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *B // conditional on whether the bucket's current metageneration does not // match the given value. func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { - c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } @@ -2678,7 +2667,7 @@ func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int // "publicReadWrite" - Project team owners get OWNER access, and // allUsers get WRITER access. func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { - c.opt_["predefinedAcl"] = predefinedAcl + c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } @@ -2699,7 +2688,7 @@ func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { - c.opt_["predefinedDefaultObjectAcl"] = predefinedDefaultObjectAcl + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) return c } @@ -2710,21 +2699,21 @@ func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl // "full" - Include all properties. // "noAcl" - Omit acl and defaultObjectAcl properties. func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { - c.opt_["projection"] = projection + c.urlParams_.Set("projection", projection) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { c.ctx_ = ctx return c @@ -2737,28 +2726,9 @@ func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["ifMetagenerationMatch"]; ok { - params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { - params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["predefinedAcl"]; ok { - params.Set("predefinedAcl", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["predefinedDefaultObjectAcl"]; ok { - params.Set("predefinedDefaultObjectAcl", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["projection"]; ok { - params.Set("projection", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -2778,7 +2748,8 @@ func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *BucketsPatchCall) Do() (*Bucket, error) { +func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -2905,16 +2876,16 @@ func (c *BucketsPatchCall) Do() (*Bucket, error) { // method id "storage.buckets.update": type BucketsUpdateCall struct { - s *Service - bucket string - bucket2 *Bucket - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context } // Update: Updates a bucket. func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { - c := &BucketsUpdateCall{s: r.s, opt_: make(map[string]interface{})} + c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.bucket2 = bucket2 return c @@ -2925,7 +2896,7 @@ func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCa // conditional on whether the bucket's current metageneration matches // the given value. func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { - c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } @@ -2934,7 +2905,7 @@ func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) * // conditional on whether the bucket's current metageneration does not // match the given value. func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { - c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } @@ -2952,7 +2923,7 @@ func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in // "publicReadWrite" - Project team owners get OWNER access, and // allUsers get WRITER access. func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { - c.opt_["predefinedAcl"] = predefinedAcl + c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } @@ -2973,7 +2944,7 @@ func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCa // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { - c.opt_["predefinedDefaultObjectAcl"] = predefinedDefaultObjectAcl + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) return c } @@ -2984,21 +2955,21 @@ func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAc // "full" - Include all properties. // "noAcl" - Omit acl and defaultObjectAcl properties. func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { - c.opt_["projection"] = projection + c.urlParams_.Set("projection", projection) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { c.ctx_ = ctx return c @@ -3011,28 +2982,9 @@ func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["ifMetagenerationMatch"]; ok { - params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { - params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["predefinedAcl"]; ok { - params.Set("predefinedAcl", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["predefinedDefaultObjectAcl"]; ok { - params.Set("predefinedDefaultObjectAcl", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["projection"]; ok { - params.Set("projection", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -3052,7 +3004,8 @@ func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *BucketsUpdateCall) Do() (*Bucket, error) { +func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -3179,30 +3132,30 @@ func (c *BucketsUpdateCall) Do() (*Bucket, error) { // method id "storage.channels.stop": type ChannelsStopCall struct { - s *Service - channel *Channel - opt_ map[string]interface{} - ctx_ context.Context + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context } // Stop: Stop watching resources through this channel func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { - c := &ChannelsStopCall{s: r.s, opt_: make(map[string]interface{})} + c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.channel = channel return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { c.ctx_ = ctx return c @@ -3215,13 +3168,9 @@ func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.SetOpaque(req.URL) req.Header.Set("Content-Type", ctype) @@ -3233,7 +3182,8 @@ func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.channels.stop" call. -func (c *ChannelsStopCall) Do() error { +func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if err != nil { return err @@ -3266,33 +3216,33 @@ func (c *ChannelsStopCall) Do() error { // method id "storage.defaultObjectAccessControls.delete": type DefaultObjectAccessControlsDeleteCall struct { - s *Service - bucket string - entity string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context } // Delete: Permanently deletes the default object ACL entry for the // specified entity on the specified bucket. func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { - c := &DefaultObjectAccessControlsDeleteCall{s: r.s, opt_: make(map[string]interface{})} + c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { c.ctx_ = ctx return c @@ -3300,13 +3250,9 @@ func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *De func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -3320,7 +3266,8 @@ func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Res } // Do executes the "storage.defaultObjectAccessControls.delete" call. -func (c *DefaultObjectAccessControlsDeleteCall) Do() error { +func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if err != nil { return err @@ -3354,6 +3301,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do() error { // }, // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -3363,27 +3311,28 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do() error { // method id "storage.defaultObjectAccessControls.get": type DefaultObjectAccessControlsGetCall struct { - s *Service - bucket string - entity string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context } // Get: Returns the default object ACL entry for the specified entity on // the specified bucket. func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { - c := &DefaultObjectAccessControlsGetCall{s: r.s, opt_: make(map[string]interface{})} + c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -3393,13 +3342,13 @@ func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *Defau // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { - c.opt_["ifNoneMatch"] = entityTag + c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { c.ctx_ = ctx return c @@ -3407,21 +3356,17 @@ func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *Defau func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) req.Header.Set("User-Agent", c.s.userAgent()) - if v, ok := c.opt_["ifNoneMatch"]; ok { - req.Header.Set("If-None-Match", fmt.Sprintf("%v", v)) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) } if c.ctx_ != nil { return ctxhttp.Do(c.ctx_, c.s.client, req) @@ -3436,7 +3381,8 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsGetCall) Do() (*ObjectAccessControl, error) { +func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -3491,6 +3437,7 @@ func (c *DefaultObjectAccessControlsGetCall) Do() (*ObjectAccessControl, error) // "$ref": "ObjectAccessControl" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -3503,30 +3450,30 @@ type DefaultObjectAccessControlsInsertCall struct { s *Service bucket string objectaccesscontrol *ObjectAccessControl - opt_ map[string]interface{} + urlParams_ gensupport.URLParams ctx_ context.Context } // Insert: Creates a new default object ACL entry on the specified // bucket. func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { - c := &DefaultObjectAccessControlsInsertCall{s: r.s, opt_: make(map[string]interface{})} + c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.objectaccesscontrol = objectaccesscontrol return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { c.ctx_ = ctx return c @@ -3539,13 +3486,9 @@ func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Res return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -3565,7 +3508,8 @@ func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Res // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsInsertCall) Do() (*ObjectAccessControl, error) { +func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -3616,6 +3560,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Do() (*ObjectAccessControl, erro // "$ref": "ObjectAccessControl" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -3625,15 +3570,16 @@ func (c *DefaultObjectAccessControlsInsertCall) Do() (*ObjectAccessControl, erro // method id "storage.defaultObjectAccessControls.list": type DefaultObjectAccessControlsListCall struct { - s *Service - bucket string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context } // List: Retrieves default object ACL entries on the specified bucket. func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { - c := &DefaultObjectAccessControlsListCall{s: r.s, opt_: make(map[string]interface{})} + c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket return c } @@ -3642,7 +3588,7 @@ func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectA // "ifMetagenerationMatch": If present, only return default ACL listing // if the bucket's current metageneration matches this value. func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { - c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } @@ -3651,15 +3597,15 @@ func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenera // listing if the bucket's current metageneration does not match the // given value. func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { - c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -3669,13 +3615,13 @@ func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *Defa // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { - c.opt_["ifNoneMatch"] = entityTag + c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { c.ctx_ = ctx return c @@ -3683,26 +3629,16 @@ func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *Defa func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["ifMetagenerationMatch"]; ok { - params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { - params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) req.Header.Set("User-Agent", c.s.userAgent()) - if v, ok := c.opt_["ifNoneMatch"]; ok { - req.Header.Set("If-None-Match", fmt.Sprintf("%v", v)) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) } if c.ctx_ != nil { return ctxhttp.Do(c.ctx_, c.s.client, req) @@ -3717,7 +3653,8 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsListCall) Do() (*ObjectAccessControls, error) { +func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -3777,6 +3714,7 @@ func (c *DefaultObjectAccessControlsListCall) Do() (*ObjectAccessControls, error // "$ref": "ObjectAccessControls" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -3790,31 +3728,31 @@ type DefaultObjectAccessControlsPatchCall struct { bucket string entity string objectaccesscontrol *ObjectAccessControl - opt_ map[string]interface{} + urlParams_ gensupport.URLParams ctx_ context.Context } // Patch: Updates a default object ACL entry on the specified bucket. // This method supports patch semantics. func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { - c := &DefaultObjectAccessControlsPatchCall{s: r.s, opt_: make(map[string]interface{})} + c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity c.objectaccesscontrol = objectaccesscontrol return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { c.ctx_ = ctx return c @@ -3827,13 +3765,9 @@ func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Resp return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -3854,7 +3788,8 @@ func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Resp // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsPatchCall) Do() (*ObjectAccessControl, error) { +func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -3912,6 +3847,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Do() (*ObjectAccessControl, error // "$ref": "ObjectAccessControl" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -3925,30 +3861,30 @@ type DefaultObjectAccessControlsUpdateCall struct { bucket string entity string objectaccesscontrol *ObjectAccessControl - opt_ map[string]interface{} + urlParams_ gensupport.URLParams ctx_ context.Context } // Update: Updates a default object ACL entry on the specified bucket. func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { - c := &DefaultObjectAccessControlsUpdateCall{s: r.s, opt_: make(map[string]interface{})} + c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.entity = entity c.objectaccesscontrol = objectaccesscontrol return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { c.ctx_ = ctx return c @@ -3961,13 +3897,9 @@ func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Res return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -3988,7 +3920,8 @@ func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Res // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsUpdateCall) Do() (*ObjectAccessControl, error) { +func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -4046,6 +3979,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do() (*ObjectAccessControl, erro // "$ref": "ObjectAccessControl" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -4055,18 +3989,18 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do() (*ObjectAccessControl, erro // method id "storage.objectAccessControls.delete": type ObjectAccessControlsDeleteCall struct { - s *Service - bucket string - object string - entity string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context } // Delete: Permanently deletes the ACL entry for the specified entity on // the specified object. func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { - c := &ObjectAccessControlsDeleteCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.entity = entity @@ -4077,21 +4011,21 @@ func (r *ObjectAccessControlsService) Delete(bucket string, object string, entit // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { - c.opt_["generation"] = generation + c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { c.ctx_ = ctx return c @@ -4099,16 +4033,9 @@ func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAcc func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["generation"]; ok { - params.Set("generation", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -4123,7 +4050,8 @@ func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, } // Do executes the "storage.objectAccessControls.delete" call. -func (c *ObjectAccessControlsDeleteCall) Do() error { +func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if err != nil { return err @@ -4170,6 +4098,7 @@ func (c *ObjectAccessControlsDeleteCall) Do() error { // }, // "path": "b/{bucket}/o/{object}/acl/{entity}", // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -4179,18 +4108,19 @@ func (c *ObjectAccessControlsDeleteCall) Do() error { // method id "storage.objectAccessControls.get": type ObjectAccessControlsGetCall struct { - s *Service - bucket string - object string - entity string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context } // Get: Returns the ACL entry for the specified entity on the specified // object. func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { - c := &ObjectAccessControlsGetCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.entity = entity @@ -4201,15 +4131,15 @@ func (r *ObjectAccessControlsService) Get(bucket string, object string, entity s // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { - c.opt_["generation"] = generation + c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4219,13 +4149,13 @@ func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccess // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { - c.opt_["ifNoneMatch"] = entityTag + c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { c.ctx_ = ctx return c @@ -4233,16 +4163,9 @@ func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccess func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["generation"]; ok { - params.Set("generation", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -4250,8 +4173,8 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err "entity": c.entity, }) req.Header.Set("User-Agent", c.s.userAgent()) - if v, ok := c.opt_["ifNoneMatch"]; ok { - req.Header.Set("If-None-Match", fmt.Sprintf("%v", v)) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) } if c.ctx_ != nil { return ctxhttp.Do(c.ctx_, c.s.client, req) @@ -4266,7 +4189,8 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectAccessControlsGetCall) Do() (*ObjectAccessControl, error) { +func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -4334,6 +4258,7 @@ func (c *ObjectAccessControlsGetCall) Do() (*ObjectAccessControl, error) { // "$ref": "ObjectAccessControl" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -4347,13 +4272,13 @@ type ObjectAccessControlsInsertCall struct { bucket string object string objectaccesscontrol *ObjectAccessControl - opt_ map[string]interface{} + urlParams_ gensupport.URLParams ctx_ context.Context } // Insert: Creates a new ACL entry on the specified object. func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { - c := &ObjectAccessControlsInsertCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.objectaccesscontrol = objectaccesscontrol @@ -4364,21 +4289,21 @@ func (r *ObjectAccessControlsService) Insert(bucket string, object string, objec // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { - c.opt_["generation"] = generation + c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { c.ctx_ = ctx return c @@ -4391,16 +4316,9 @@ func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["generation"]; ok { - params.Set("generation", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -4421,7 +4339,8 @@ func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectAccessControlsInsertCall) Do() (*ObjectAccessControl, error) { +func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -4485,6 +4404,7 @@ func (c *ObjectAccessControlsInsertCall) Do() (*ObjectAccessControl, error) { // "$ref": "ObjectAccessControl" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -4494,16 +4414,17 @@ func (c *ObjectAccessControlsInsertCall) Do() (*ObjectAccessControl, error) { // method id "storage.objectAccessControls.list": type ObjectAccessControlsListCall struct { - s *Service - bucket string - object string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context } // List: Retrieves ACL entries on the specified object. func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { - c := &ObjectAccessControlsListCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object return c @@ -4513,15 +4434,15 @@ func (r *ObjectAccessControlsService) List(bucket string, object string) *Object // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { - c.opt_["generation"] = generation + c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4531,13 +4452,13 @@ func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAcces // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { - c.opt_["ifNoneMatch"] = entityTag + c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { c.ctx_ = ctx return c @@ -4545,24 +4466,17 @@ func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAcces func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["generation"]; ok { - params.Set("generation", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) req.Header.Set("User-Agent", c.s.userAgent()) - if v, ok := c.opt_["ifNoneMatch"]; ok { - req.Header.Set("If-None-Match", fmt.Sprintf("%v", v)) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) } if c.ctx_ != nil { return ctxhttp.Do(c.ctx_, c.s.client, req) @@ -4577,7 +4491,8 @@ func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, er // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectAccessControlsListCall) Do() (*ObjectAccessControls, error) { +func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -4638,6 +4553,7 @@ func (c *ObjectAccessControlsListCall) Do() (*ObjectAccessControls, error) { // "$ref": "ObjectAccessControls" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -4652,14 +4568,14 @@ type ObjectAccessControlsPatchCall struct { object string entity string objectaccesscontrol *ObjectAccessControl - opt_ map[string]interface{} + urlParams_ gensupport.URLParams ctx_ context.Context } // Patch: Updates an ACL entry on the specified object. This method // supports patch semantics. func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { - c := &ObjectAccessControlsPatchCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.entity = entity @@ -4671,21 +4587,21 @@ func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { - c.opt_["generation"] = generation + c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { c.ctx_ = ctx return c @@ -4698,16 +4614,9 @@ func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, e return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["generation"]; ok { - params.Set("generation", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -4729,7 +4638,8 @@ func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, e // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectAccessControlsPatchCall) Do() (*ObjectAccessControl, error) { +func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -4800,6 +4710,7 @@ func (c *ObjectAccessControlsPatchCall) Do() (*ObjectAccessControl, error) { // "$ref": "ObjectAccessControl" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -4814,13 +4725,13 @@ type ObjectAccessControlsUpdateCall struct { object string entity string objectaccesscontrol *ObjectAccessControl - opt_ map[string]interface{} + urlParams_ gensupport.URLParams ctx_ context.Context } // Update: Updates an ACL entry on the specified object. func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { - c := &ObjectAccessControlsUpdateCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.entity = entity @@ -4832,21 +4743,21 @@ func (r *ObjectAccessControlsService) Update(bucket string, object string, entit // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { - c.opt_["generation"] = generation + c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { c.ctx_ = ctx return c @@ -4859,16 +4770,9 @@ func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["generation"]; ok { - params.Set("generation", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -4890,7 +4794,8 @@ func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectAccessControlsUpdateCall) Do() (*ObjectAccessControl, error) { +func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -4961,6 +4866,7 @@ func (c *ObjectAccessControlsUpdateCall) Do() (*ObjectAccessControl, error) { // "$ref": "ObjectAccessControl" // }, // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -4974,14 +4880,14 @@ type ObjectsComposeCall struct { destinationBucket string destinationObject string composerequest *ComposeRequest - opt_ map[string]interface{} + urlParams_ gensupport.URLParams ctx_ context.Context } // Compose: Concatenates a list of existing objects into a new object in // the same bucket. func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { - c := &ObjectsComposeCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.destinationBucket = destinationBucket c.destinationObject = destinationObject c.composerequest = composerequest @@ -5005,7 +4911,7 @@ func (r *ObjectsService) Compose(destinationBucket string, destinationObject str // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { - c.opt_["destinationPredefinedAcl"] = destinationPredefinedAcl + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) return c } @@ -5013,7 +4919,7 @@ func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl s // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { - c.opt_["ifGenerationMatch"] = ifGenerationMatch + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } @@ -5021,21 +4927,21 @@ func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *Objects // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { - c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do and Download methods. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { c.ctx_ = ctx return c @@ -5048,22 +4954,9 @@ func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["destinationPredefinedAcl"]; ok { - params.Set("destinationPredefinedAcl", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationMatch"]; ok { - params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationMatch"]; ok { - params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "destinationBucket": c.destinationBucket, @@ -5080,7 +4973,8 @@ func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { // Download fetches the API endpoint's "media" value, instead of the normal // API response value. If the returned error is nil, the Response is guaranteed to // have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsComposeCall) Download() (*http.Response, error) { +func (c *ObjectsComposeCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("media") if err != nil { return nil, err @@ -5099,7 +4993,8 @@ func (c *ObjectsComposeCall) Download() (*http.Response, error) { // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ObjectsComposeCall) Do() (*Object, error) { +func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -5194,7 +5089,8 @@ func (c *ObjectsComposeCall) Do() (*Object, error) { // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ], - // "supportsMediaDownload": true + // "supportsMediaDownload": true, + // "useMediaDownloadService": true // } } @@ -5208,14 +5104,14 @@ type ObjectsCopyCall struct { destinationBucket string destinationObject string object *Object - opt_ map[string]interface{} + urlParams_ gensupport.URLParams ctx_ context.Context } // Copy: Copies a source object to a destination object. Optionally // overrides metadata. func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { - c := &ObjectsCopyCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket c.sourceObject = sourceObject c.destinationBucket = destinationBucket @@ -5241,7 +5137,7 @@ func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinat // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { - c.opt_["destinationPredefinedAcl"] = destinationPredefinedAcl + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) return c } @@ -5249,7 +5145,7 @@ func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl stri // Makes the operation conditional on whether the destination object's // current generation matches the given value. func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { - c.opt_["ifGenerationMatch"] = ifGenerationMatch + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } @@ -5258,7 +5154,7 @@ func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCop // the destination object's current generation does not match the given // value. func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { - c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } @@ -5267,7 +5163,7 @@ func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *Obje // the destination object's current metageneration matches the given // value. func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { - c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } @@ -5276,7 +5172,7 @@ func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *Ob // whether the destination object's current metageneration does not // match the given value. func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { - c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } @@ -5284,7 +5180,7 @@ func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int6 // "ifSourceGenerationMatch": Makes the operation conditional on whether // the source object's generation matches the given value. func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { - c.opt_["ifSourceGenerationMatch"] = ifSourceGenerationMatch + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) return c } @@ -5293,7 +5189,7 @@ func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) // whether the source object's generation does not match the given // value. func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { - c.opt_["ifSourceGenerationNotMatch"] = ifSourceGenerationNotMatch + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) return c } @@ -5302,7 +5198,7 @@ func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch // whether the source object's current metageneration matches the given // value. func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { - c.opt_["ifSourceMetagenerationMatch"] = ifSourceMetagenerationMatch + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) return c } @@ -5311,7 +5207,7 @@ func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatc // whether the source object's current metageneration does not match the // given value. func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { - c.opt_["ifSourceMetagenerationNotMatch"] = ifSourceMetagenerationNotMatch + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) return c } @@ -5323,7 +5219,7 @@ func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationN // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { - c.opt_["projection"] = projection + c.urlParams_.Set("projection", projection) return c } @@ -5331,21 +5227,21 @@ func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { // present, selects a specific revision of the source object (as opposed // to the latest version, the default). func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { - c.opt_["sourceGeneration"] = sourceGeneration + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do and Download methods. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { c.ctx_ = ctx return c @@ -5358,46 +5254,9 @@ func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["destinationPredefinedAcl"]; ok { - params.Set("destinationPredefinedAcl", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationMatch"]; ok { - params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationNotMatch"]; ok { - params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationMatch"]; ok { - params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { - params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifSourceGenerationMatch"]; ok { - params.Set("ifSourceGenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifSourceGenerationNotMatch"]; ok { - params.Set("ifSourceGenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifSourceMetagenerationMatch"]; ok { - params.Set("ifSourceMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifSourceMetagenerationNotMatch"]; ok { - params.Set("ifSourceMetagenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["projection"]; ok { - params.Set("projection", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["sourceGeneration"]; ok { - params.Set("sourceGeneration", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "sourceBucket": c.sourceBucket, @@ -5416,7 +5275,8 @@ func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { // Download fetches the API endpoint's "media" value, instead of the normal // API response value. If the returned error is nil, the Response is guaranteed to // have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsCopyCall) Download() (*http.Response, error) { +func (c *ObjectsCopyCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("media") if err != nil { return nil, err @@ -5435,7 +5295,8 @@ func (c *ObjectsCopyCall) Download() (*http.Response, error) { // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ObjectsCopyCall) Do() (*Object, error) { +func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -5599,7 +5460,8 @@ func (c *ObjectsCopyCall) Do() (*Object, error) { // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ], - // "supportsMediaDownload": true + // "supportsMediaDownload": true, + // "useMediaDownloadService": true // } } @@ -5607,18 +5469,18 @@ func (c *ObjectsCopyCall) Do() (*Object, error) { // method id "storage.objects.delete": type ObjectsDeleteCall struct { - s *Service - bucket string - object string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ctx_ context.Context } // Delete: Deletes an object and its metadata. Deletions are permanent // if versioning is not enabled for the bucket, or if the generation // parameter is used. func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { - c := &ObjectsDeleteCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object return c @@ -5628,7 +5490,7 @@ func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall // permanently deletes a specific revision of this object (as opposed to // the latest version, the default). func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { - c.opt_["generation"] = generation + c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } @@ -5636,7 +5498,7 @@ func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { - c.opt_["ifGenerationMatch"] = ifGenerationMatch + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } @@ -5644,7 +5506,7 @@ func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsD // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's current generation does not match the given value. func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { - c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } @@ -5652,7 +5514,7 @@ func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *Ob // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { - c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } @@ -5661,21 +5523,21 @@ func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) * // whether the object's current metageneration does not match the given // value. func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { - c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { c.ctx_ = ctx return c @@ -5683,28 +5545,9 @@ func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["generation"]; ok { - params.Set("generation", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationMatch"]; ok { - params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationNotMatch"]; ok { - params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationMatch"]; ok { - params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { - params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -5718,7 +5561,8 @@ func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "storage.objects.delete" call. -func (c *ObjectsDeleteCall) Do() error { +func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if err != nil { return err @@ -5793,16 +5637,17 @@ func (c *ObjectsDeleteCall) Do() error { // method id "storage.objects.get": type ObjectsGetCall struct { - s *Service - bucket string - object string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context } // Get: Retrieves an object or its metadata. func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { - c := &ObjectsGetCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object return c @@ -5812,7 +5657,7 @@ func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { - c.opt_["generation"] = generation + c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } @@ -5820,7 +5665,7 @@ func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { // Makes the operation conditional on whether the object's generation // matches the given value. func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { - c.opt_["ifGenerationMatch"] = ifGenerationMatch + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } @@ -5828,7 +5673,7 @@ func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetC // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's generation does not match the given value. func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { - c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } @@ -5836,7 +5681,7 @@ func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *Objec // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { - c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } @@ -5845,7 +5690,7 @@ func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *Obj // whether the object's current metageneration does not match the given // value. func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { - c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } @@ -5856,15 +5701,15 @@ func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64 // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { - c.opt_["projection"] = projection + c.urlParams_.Set("projection", projection) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5874,13 +5719,13 @@ func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { - c.opt_["ifNoneMatch"] = entityTag + c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do and Download methods. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { c.ctx_ = ctx return c @@ -5888,39 +5733,17 @@ func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["generation"]; ok { - params.Set("generation", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationMatch"]; ok { - params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationNotMatch"]; ok { - params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationMatch"]; ok { - params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { - params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["projection"]; ok { - params.Set("projection", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) req.Header.Set("User-Agent", c.s.userAgent()) - if v, ok := c.opt_["ifNoneMatch"]; ok { - req.Header.Set("If-None-Match", fmt.Sprintf("%v", v)) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) } if c.ctx_ != nil { return ctxhttp.Do(c.ctx_, c.s.client, req) @@ -5931,7 +5754,8 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { // Download fetches the API endpoint's "media" value, instead of the normal // API response value. If the returned error is nil, the Response is guaranteed to // have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsGetCall) Download() (*http.Response, error) { +func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("media") if err != nil { return nil, err @@ -5950,7 +5774,8 @@ func (c *ObjectsGetCall) Download() (*http.Response, error) { // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ObjectsGetCall) Do() (*Object, error) { +func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -6054,7 +5879,8 @@ func (c *ObjectsGetCall) Do() (*Object, error) { // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ], - // "supportsMediaDownload": true + // "supportsMediaDownload": true, + // "useMediaDownloadService": true // } } @@ -6062,20 +5888,21 @@ func (c *ObjectsGetCall) Do() (*Object, error) { // method id "storage.objects.insert": type ObjectsInsertCall struct { - s *Service - bucket string - object *Object - opt_ map[string]interface{} - media_ io.Reader - resumable_ googleapi.SizeReaderAt - mediaType_ string - protocol_ string - ctx_ context.Context + s *Service + bucket string + object *Object + urlParams_ gensupport.URLParams + media_ io.Reader + resumableBuffer_ *gensupport.ResumableBuffer + mediaType_ string + mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater_ googleapi.ProgressUpdater + ctx_ context.Context } // Insert: Stores a new object and metadata. func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { - c := &ObjectsInsertCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object return c @@ -6088,7 +5915,7 @@ func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCal // an object with uploadType=media to indicate the encoding of the // content being uploaded. func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { - c.opt_["contentEncoding"] = contentEncoding + c.urlParams_.Set("contentEncoding", contentEncoding) return c } @@ -6096,7 +5923,7 @@ func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInse // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { - c.opt_["ifGenerationMatch"] = ifGenerationMatch + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } @@ -6104,7 +5931,7 @@ func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsI // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's current generation does not match the given value. func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { - c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } @@ -6112,7 +5939,7 @@ func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *Ob // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { - c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } @@ -6121,7 +5948,7 @@ func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) * // whether the object's current metageneration does not match the given // value. func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { - c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } @@ -6130,7 +5957,7 @@ func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in // object metadata's name value, if any. For information about how to // URL encode object names to be path safe, see Encoding URI Path Parts. func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { - c.opt_["name"] = name + c.urlParams_.Set("name", name) return c } @@ -6150,7 +5977,7 @@ func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { - c.opt_["predefinedAcl"] = predefinedAcl + c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } @@ -6162,53 +5989,69 @@ func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCa // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { - c.opt_["projection"] = projection + c.urlParams_.Set("projection", projection) return c } -// Media specifies the media to upload in a single chunk. +// Media specifies the media to upload in one or more chunks. The chunk +// size may be controlled by supplying a MediaOption generated by +// googleapi.ChunkSize. The chunk size defaults to +// googleapi.DefaultUploadChunkSize.The Content-Type header used in the +// upload request will be determined by sniffing the contents of r, +// unless a MediaOption generated by googleapi.ContentType is +// supplied. // At most one of Media and ResumableMedia may be set. -func (c *ObjectsInsertCall) Media(r io.Reader) *ObjectsInsertCall { - c.media_ = r - c.protocol_ = "multipart" +func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { + opts := googleapi.ProcessMediaOptions(options) + chunkSize := opts.ChunkSize + if !opts.ForceEmptyContentType { + r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType) + } + c.media_, c.resumableBuffer_ = gensupport.PrepareUpload(r, chunkSize) return c } -// ResumableMedia specifies the media to upload in chunks and can be canceled with ctx. -// At most one of Media and ResumableMedia may be set. -// mediaType identifies the MIME media type of the upload, such as "image/png". -// If mediaType is "", it will be auto-detected. -// The provided ctx will supersede any context previously provided to -// the Context method. +// ResumableMedia specifies the media to upload in chunks and can be +// canceled with ctx. +// +// Deprecated: use Media instead. +// +// At most one of Media and ResumableMedia may be set. mediaType +// identifies the MIME media type of the upload, such as "image/png". If +// mediaType is "", it will be auto-detected. The provided ctx will +// supersede any context previously provided to the Context method. func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { c.ctx_ = ctx - c.resumable_ = io.NewSectionReader(r, 0, size) - c.mediaType_ = mediaType - c.protocol_ = "resumable" + rdr := gensupport.ReaderAtToReader(r, size) + rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType) + c.resumableBuffer_ = gensupport.NewResumableBuffer(rdr, googleapi.DefaultUploadChunkSize) + c.media_ = nil + c.mediaSize_ = size return c } -// ProgressUpdater provides a callback function that will be called after every chunk. -// It should be a low-latency function in order to not slow down the upload operation. -// This should only be called when using ResumableMedia (as opposed to Media). +// ProgressUpdater provides a callback function that will be called +// after every chunk. It should be a low-latency function in order to +// not slow down the upload operation. This should only be called when +// using ResumableMedia (as opposed to Media). func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { - c.opt_["progressUpdater"] = pu + c.progressUpdater_ = pu return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. -// This context will supersede any context previously provided to -// the ResumableMedia method. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +// This context will supersede any context previously provided to the +// ResumableMedia method. func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { c.ctx_ = ctx return c @@ -6221,61 +6064,31 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["contentEncoding"]; ok { - params.Set("contentEncoding", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationMatch"]; ok { - params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationNotMatch"]; ok { - params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationMatch"]; ok { - params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { - params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["name"]; ok { - params.Set("name", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["predefinedAcl"]; ok { - params.Set("predefinedAcl", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["projection"]; ok { - params.Set("projection", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") - if c.media_ != nil || c.resumable_ != nil { + if c.media_ != nil || c.resumableBuffer_ != nil { urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) - params.Set("uploadType", c.protocol_) - } - urls += "?" + params.Encode() - if c.protocol_ != "resumable" { - var cancel func() - cancel, _ = googleapi.ConditionallyIncludeMedia(c.media_, &body, &ctype) - if cancel != nil { - defer cancel() + protocol := "multipart" + if c.resumableBuffer_ != nil { + protocol = "resumable" } + c.urlParams_.Set("uploadType", protocol) + } + urls += "?" + c.urlParams_.Encode() + if c.media_ != nil { + var combined io.ReadCloser + combined, ctype = gensupport.CombineBodyMedia(body, ctype, c.media_, c.mediaType_) + defer combined.Close() + body = combined } req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - if c.protocol_ == "resumable" { - if c.mediaType_ == "" { - c.mediaType_ = googleapi.DetectMediaType(c.resumable_) - } + if c.resumableBuffer_ != nil && c.mediaType_ != "" { req.Header.Set("X-Upload-Content-Type", c.mediaType_) - req.Header.Set("Content-Type", "application/json; charset=utf-8") - } else { - req.Header.Set("Content-Type", ctype) } + req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) if c.ctx_ != nil { return ctxhttp.Do(c.ctx_, c.s.client, req) @@ -6290,8 +6103,11 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ObjectsInsertCall) Do() (*Object, error) { - res, err := c.doRequest("json") +func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := gensupport.Retry(c.ctx_, func() (*http.Response, error) { + return c.doRequest("json") + }, gensupport.DefaultBackoffStrategy()) if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() @@ -6308,28 +6124,32 @@ func (c *ObjectsInsertCall) Do() (*Object, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - var progressUpdater_ googleapi.ProgressUpdater - if v, ok := c.opt_["progressUpdater"]; ok { - if pu, ok := v.(googleapi.ProgressUpdater); ok { - progressUpdater_ = pu - } - } - if c.protocol_ == "resumable" { + if c.resumableBuffer_ != nil { loc := res.Header.Get("Location") - rx := &googleapi.ResumableUpload{ - Client: c.s.client, - UserAgent: c.s.userAgent(), - URI: loc, - Media: c.resumable_, - MediaType: c.mediaType_, - ContentLength: c.resumable_.Size(), - Callback: progressUpdater_, + rx := &gensupport.ResumableUpload{ + Client: c.s.client, + UserAgent: c.s.userAgent(), + URI: loc, + Media: c.resumableBuffer_, + MediaType: c.mediaType_, + Callback: func(curr int64) { + if c.progressUpdater_ != nil { + c.progressUpdater_(curr, c.mediaSize_) + } + }, + } + ctx := c.ctx_ + if ctx == nil { + ctx = context.TODO() } - res, err = rx.Upload(c.ctx_) + res, err = rx.Upload(ctx) if err != nil { return nil, err } defer res.Body.Close() + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } } ret := &Object{ ServerResponse: googleapi.ServerResponse{ @@ -6452,7 +6272,8 @@ func (c *ObjectsInsertCall) Do() (*Object, error) { // "https://www.googleapis.com/auth/devstorage.read_write" // ], // "supportsMediaDownload": true, - // "supportsMediaUpload": true + // "supportsMediaUpload": true, + // "useMediaDownloadService": true // } } @@ -6460,15 +6281,16 @@ func (c *ObjectsInsertCall) Do() (*Object, error) { // method id "storage.objects.list": type ObjectsListCall struct { - s *Service - bucket string - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context } // List: Retrieves a list of objects matching the criteria. func (r *ObjectsService) List(bucket string) *ObjectsListCall { - c := &ObjectsListCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket return c } @@ -6480,7 +6302,7 @@ func (r *ObjectsService) List(bucket string) *ObjectsListCall { // truncated after the delimiter, returned in prefixes. Duplicate // prefixes are omitted. func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { - c.opt_["delimiter"] = delimiter + c.urlParams_.Set("delimiter", delimiter) return c } @@ -6489,7 +6311,7 @@ func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { // fewer total results may be returned than requested. The default value // of this parameter is 1,000 items. func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { - c.opt_["maxResults"] = maxResults + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -6497,14 +6319,14 @@ func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { // previously-returned page token representing part of the larger set of // results to view. func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { - c.opt_["pageToken"] = pageToken + c.urlParams_.Set("pageToken", pageToken) return c } // Prefix sets the optional parameter "prefix": Filter results to // objects whose names begin with this prefix. func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { - c.opt_["prefix"] = prefix + c.urlParams_.Set("prefix", prefix) return c } @@ -6515,7 +6337,7 @@ func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { - c.opt_["projection"] = projection + c.urlParams_.Set("projection", projection) return c } @@ -6523,15 +6345,15 @@ func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { // versions of an object as distinct results. The default is false. For // more information, see Object Versioning. func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { - c.opt_["versions"] = versions + c.urlParams_.Set("versions", fmt.Sprint(versions)) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6541,13 +6363,13 @@ func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { - c.opt_["ifNoneMatch"] = entityTag + c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { c.ctx_ = ctx return c @@ -6555,38 +6377,16 @@ func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["delimiter"]; ok { - params.Set("delimiter", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["maxResults"]; ok { - params.Set("maxResults", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["pageToken"]; ok { - params.Set("pageToken", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["prefix"]; ok { - params.Set("prefix", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["projection"]; ok { - params.Set("projection", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["versions"]; ok { - params.Set("versions", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) req.Header.Set("User-Agent", c.s.userAgent()) - if v, ok := c.opt_["ifNoneMatch"]; ok { - req.Header.Set("If-None-Match", fmt.Sprintf("%v", v)) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) } if c.ctx_ != nil { return ctxhttp.Do(c.ctx_, c.s.client, req) @@ -6601,7 +6401,8 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ObjectsListCall) Do() (*Objects, error) { +func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -6700,21 +6501,42 @@ func (c *ObjectsListCall) Do() (*Objects, error) { } +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "storage.objects.patch": type ObjectsPatchCall struct { - s *Service - bucket string - object string - object2 *Object - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context } // Patch: Updates an object's metadata. This method supports patch // semantics. func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { - c := &ObjectsPatchCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.object2 = object2 @@ -6725,7 +6547,7 @@ func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *O // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { - c.opt_["generation"] = generation + c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } @@ -6733,7 +6555,7 @@ func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { - c.opt_["ifGenerationMatch"] = ifGenerationMatch + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } @@ -6741,7 +6563,7 @@ func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPa // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's current generation does not match the given value. func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { - c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } @@ -6749,7 +6571,7 @@ func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *Obj // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { - c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } @@ -6758,7 +6580,7 @@ func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *O // whether the object's current metageneration does not match the given // value. func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { - c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } @@ -6778,7 +6600,7 @@ func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { - c.opt_["predefinedAcl"] = predefinedAcl + c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } @@ -6789,21 +6611,21 @@ func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { - c.opt_["projection"] = projection + c.urlParams_.Set("projection", projection) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { c.ctx_ = ctx return c @@ -6816,34 +6638,9 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["generation"]; ok { - params.Set("generation", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationMatch"]; ok { - params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationNotMatch"]; ok { - params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationMatch"]; ok { - params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { - params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["predefinedAcl"]; ok { - params.Set("predefinedAcl", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["projection"]; ok { - params.Set("projection", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -6864,7 +6661,8 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ObjectsPatchCall) Do() (*Object, error) { +func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -7003,14 +6801,14 @@ type ObjectsRewriteCall struct { destinationBucket string destinationObject string object *Object - opt_ map[string]interface{} + urlParams_ gensupport.URLParams ctx_ context.Context } // Rewrite: Rewrites a source object to a destination object. Optionally // overrides metadata. func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { - c := &ObjectsRewriteCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sourceBucket = sourceBucket c.sourceObject = sourceObject c.destinationBucket = destinationBucket @@ -7036,7 +6834,7 @@ func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, desti // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { - c.opt_["destinationPredefinedAcl"] = destinationPredefinedAcl + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) return c } @@ -7044,7 +6842,7 @@ func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl s // Makes the operation conditional on whether the destination object's // current generation matches the given value. func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { - c.opt_["ifGenerationMatch"] = ifGenerationMatch + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } @@ -7053,7 +6851,7 @@ func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *Objects // the destination object's current generation does not match the given // value. func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { - c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } @@ -7062,7 +6860,7 @@ func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *O // the destination object's current metageneration matches the given // value. func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { - c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } @@ -7071,7 +6869,7 @@ func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) // whether the destination object's current metageneration does not // match the given value. func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { - c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } @@ -7079,7 +6877,7 @@ func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch i // "ifSourceGenerationMatch": Makes the operation conditional on whether // the source object's generation matches the given value. func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { - c.opt_["ifSourceGenerationMatch"] = ifSourceGenerationMatch + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) return c } @@ -7088,7 +6886,7 @@ func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int // whether the source object's generation does not match the given // value. func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { - c.opt_["ifSourceGenerationNotMatch"] = ifSourceGenerationNotMatch + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) return c } @@ -7097,7 +6895,7 @@ func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMat // whether the source object's current metageneration matches the given // value. func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { - c.opt_["ifSourceMetagenerationMatch"] = ifSourceMetagenerationMatch + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) return c } @@ -7106,7 +6904,7 @@ func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationM // whether the source object's current metageneration does not match the // given value. func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { - c.opt_["ifSourceMetagenerationNotMatch"] = ifSourceMetagenerationNotMatch + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) return c } @@ -7120,7 +6918,7 @@ func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerati // change across rewrite calls else you'll get an error that the // rewriteToken is invalid. func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { - c.opt_["maxBytesRewrittenPerCall"] = maxBytesRewrittenPerCall + c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall)) return c } @@ -7132,7 +6930,7 @@ func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall i // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { - c.opt_["projection"] = projection + c.urlParams_.Set("projection", projection) return c } @@ -7143,7 +6941,7 @@ func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { // but if included those fields must match the values provided in the // first rewrite request. func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { - c.opt_["rewriteToken"] = rewriteToken + c.urlParams_.Set("rewriteToken", rewriteToken) return c } @@ -7151,21 +6949,21 @@ func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCa // present, selects a specific revision of the source object (as opposed // to the latest version, the default). func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { - c.opt_["sourceGeneration"] = sourceGeneration + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { c.ctx_ = ctx return c @@ -7178,52 +6976,9 @@ func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["destinationPredefinedAcl"]; ok { - params.Set("destinationPredefinedAcl", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationMatch"]; ok { - params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationNotMatch"]; ok { - params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationMatch"]; ok { - params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { - params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifSourceGenerationMatch"]; ok { - params.Set("ifSourceGenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifSourceGenerationNotMatch"]; ok { - params.Set("ifSourceGenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifSourceMetagenerationMatch"]; ok { - params.Set("ifSourceMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifSourceMetagenerationNotMatch"]; ok { - params.Set("ifSourceMetagenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["maxBytesRewrittenPerCall"]; ok { - params.Set("maxBytesRewrittenPerCall", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["projection"]; ok { - params.Set("projection", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["rewriteToken"]; ok { - params.Set("rewriteToken", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["sourceGeneration"]; ok { - params.Set("sourceGeneration", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "sourceBucket": c.sourceBucket, @@ -7246,7 +7001,8 @@ func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectsRewriteCall) Do() (*RewriteResponse, error) { +func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -7428,17 +7184,17 @@ func (c *ObjectsRewriteCall) Do() (*RewriteResponse, error) { // method id "storage.objects.update": type ObjectsUpdateCall struct { - s *Service - bucket string - object string - object2 *Object - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context } // Update: Updates an object's metadata. func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { - c := &ObjectsUpdateCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object c.object2 = object2 @@ -7449,7 +7205,7 @@ func (r *ObjectsService) Update(bucket string, object string, object2 *Object) * // selects a specific revision of this object (as opposed to the latest // version, the default). func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { - c.opt_["generation"] = generation + c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } @@ -7457,7 +7213,7 @@ func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { // Makes the operation conditional on whether the object's current // generation matches the given value. func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { - c.opt_["ifGenerationMatch"] = ifGenerationMatch + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } @@ -7465,7 +7221,7 @@ func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsU // "ifGenerationNotMatch": Makes the operation conditional on whether // the object's current generation does not match the given value. func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { - c.opt_["ifGenerationNotMatch"] = ifGenerationNotMatch + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } @@ -7473,7 +7229,7 @@ func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *Ob // "ifMetagenerationMatch": Makes the operation conditional on whether // the object's current metageneration matches the given value. func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { - c.opt_["ifMetagenerationMatch"] = ifMetagenerationMatch + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } @@ -7482,7 +7238,7 @@ func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) * // whether the object's current metageneration does not match the given // value. func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { - c.opt_["ifMetagenerationNotMatch"] = ifMetagenerationNotMatch + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } @@ -7502,7 +7258,7 @@ func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in // "publicRead" - Object owner gets OWNER access, and allUsers get // READER access. func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { - c.opt_["predefinedAcl"] = predefinedAcl + c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } @@ -7513,21 +7269,21 @@ func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCa // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { - c.opt_["projection"] = projection + c.urlParams_.Set("projection", projection) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do and Download methods. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { c.ctx_ = ctx return c @@ -7540,34 +7296,9 @@ func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["generation"]; ok { - params.Set("generation", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationMatch"]; ok { - params.Set("ifGenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifGenerationNotMatch"]; ok { - params.Set("ifGenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationMatch"]; ok { - params.Set("ifMetagenerationMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["ifMetagenerationNotMatch"]; ok { - params.Set("ifMetagenerationNotMatch", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["predefinedAcl"]; ok { - params.Set("predefinedAcl", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["projection"]; ok { - params.Set("projection", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -7584,7 +7315,8 @@ func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { // Download fetches the API endpoint's "media" value, instead of the normal // API response value. If the returned error is nil, the Response is guaranteed to // have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsUpdateCall) Download() (*http.Response, error) { +func (c *ObjectsUpdateCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("media") if err != nil { return nil, err @@ -7603,7 +7335,8 @@ func (c *ObjectsUpdateCall) Download() (*http.Response, error) { // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ObjectsUpdateCall) Do() (*Object, error) { +func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -7729,7 +7462,8 @@ func (c *ObjectsUpdateCall) Do() (*Object, error) { // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" // ], - // "supportsMediaDownload": true + // "supportsMediaDownload": true, + // "useMediaDownloadService": true // } } @@ -7737,16 +7471,16 @@ func (c *ObjectsUpdateCall) Do() (*Object, error) { // method id "storage.objects.watchAll": type ObjectsWatchAllCall struct { - s *Service - bucket string - channel *Channel - opt_ map[string]interface{} - ctx_ context.Context + s *Service + bucket string + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context } // WatchAll: Watch for changes on all objects in a bucket. func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { - c := &ObjectsWatchAllCall{s: r.s, opt_: make(map[string]interface{})} + c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.channel = channel return c @@ -7759,7 +7493,7 @@ func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatch // truncated after the delimiter, returned in prefixes. Duplicate // prefixes are omitted. func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { - c.opt_["delimiter"] = delimiter + c.urlParams_.Set("delimiter", delimiter) return c } @@ -7768,7 +7502,7 @@ func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { // fewer total results may be returned than requested. The default value // of this parameter is 1,000 items. func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { - c.opt_["maxResults"] = maxResults + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -7776,14 +7510,14 @@ func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall // previously-returned page token representing part of the larger set of // results to view. func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { - c.opt_["pageToken"] = pageToken + c.urlParams_.Set("pageToken", pageToken) return c } // Prefix sets the optional parameter "prefix": Filter results to // objects whose names begin with this prefix. func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { - c.opt_["prefix"] = prefix + c.urlParams_.Set("prefix", prefix) return c } @@ -7794,7 +7528,7 @@ func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { // "full" - Include all properties. // "noAcl" - Omit the acl property. func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { - c.opt_["projection"] = projection + c.urlParams_.Set("projection", projection) return c } @@ -7802,21 +7536,21 @@ func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall // versions of an object as distinct results. The default is false. For // more information, see Object Versioning. func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { - c.opt_["versions"] = versions + c.urlParams_.Set("versions", fmt.Sprint(versions)) return c } -// Fields allows partial responses to be retrieved. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { - c.opt_["fields"] = googleapi.CombineFields(s) + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// Context sets the context to be used in this call's Do method. -// Any pending HTTP request will be aborted if the provided context -// is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { c.ctx_ = ctx return c @@ -7829,31 +7563,9 @@ func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { return nil, err } ctype := "application/json" - params := make(url.Values) - params.Set("alt", alt) - if v, ok := c.opt_["delimiter"]; ok { - params.Set("delimiter", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["maxResults"]; ok { - params.Set("maxResults", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["pageToken"]; ok { - params.Set("pageToken", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["prefix"]; ok { - params.Set("prefix", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["projection"]; ok { - params.Set("projection", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["versions"]; ok { - params.Set("versions", fmt.Sprintf("%v", v)) - } - if v, ok := c.opt_["fields"]; ok { - params.Set("fields", fmt.Sprintf("%v", v)) - } + c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") - urls += "?" + params.Encode() + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, @@ -7873,7 +7585,8 @@ func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ObjectsWatchAllCall) Do() (*Channel, error) { +func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/cloud/cloud.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/cloud/cloud.go index 96d36baf2cb7..a634b055245a 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/cloud/cloud.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/cloud/cloud.go @@ -14,7 +14,7 @@ // Package cloud contains Google Cloud Platform APIs related types // and common functions. -package cloud // import "google.golang.org/cloud" +package cloud import ( "net/http" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/cloud/compute/metadata/metadata.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/cloud/compute/metadata/metadata.go new file mode 100644 index 000000000000..3dd684e08809 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/cloud/compute/metadata/metadata.go @@ -0,0 +1,327 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" + + "google.golang.org/cloud/internal" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var metaClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 750 * time.Millisecond, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 750 * time.Millisecond, + }, + }, +} + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag. This func is otherwise equivalent to Get. +func getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv("GCE_METADATA_HOST") + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = "169.254.169.254" + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + res, err := metaClient.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var onGCE struct { + sync.Mutex + set bool + v bool +} + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + defer onGCE.Unlock() + onGCE.Lock() + if onGCE.set { + return onGCE.v + } + onGCE.set = true + + // We use the DNS name of the metadata service here instead of the IP address + // because we expect that to fail faster in the not-on-GCE case. + res, err := metaClient.Get("http://metadata.google.internal") + if err != nil { + return false + } + onGCE.v = res.Header.Get("Metadata-Flavor") == "Google" + return onGCE.v +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + suffix += "?wait_for_change=true&last_etag=" + for { + val, etag, err := getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/storage.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/storage.go index 8aa70ff429c4..bf22c6aea5de 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/storage.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/storage.go @@ -15,7 +15,7 @@ // Package storage contains a Google Cloud Storage client. // // This package is experimental and may make backwards-incompatible changes. -package storage // import "google.golang.org/cloud/storage" +package storage import ( "crypto" diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/CONTRIBUTING.md index b6decb6d8c81..407d384a7c8b 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -20,8 +20,4 @@ When filing an issue, make sure to answer these five questions: 5. What did you see instead? ### Contributing code -Please read the Contribution Guidelines before sending patches. - -We will not accept GitHub pull requests once Gerrit is setup (we will use Gerrit instead for code review). - Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/Makefile b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/Makefile index 0dc225ff41ec..12e84e4e5be2 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/Makefile +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/Makefile @@ -45,3 +45,6 @@ testrace: testdeps clean: go clean google.golang.org/grpc/... + +coverage: testdeps + ./coverage.sh --coveralls diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/README.md b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/README.md index caa40261f9fb..37b05f0953d5 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/README.md +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/README.md @@ -2,22 +2,31 @@ [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) -The Go implementation of [gRPC](https://github.com/grpc/grpc) +The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. Installation ------------ -To install this package, you need to install Go 1.4 and setup your Go workspace on your computer. The simplest way to install the library is to run: +To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run: ``` $ go get google.golang.org/grpc ``` +Prerequisites +------------- + +This requires Go 1.4 or above. + +Constraints +----------- +The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. + Documentation ------------- -You can find more detailed documentation and examples in the [grpc-common repository](http://github.com/grpc/grpc-common). +See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). Status ------ -Alpha - ready for early adopters. +Beta release diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/call.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/call.go index 3b974721d05e..504a6e18a851 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/call.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/call.go @@ -34,13 +34,13 @@ package grpc import ( + "bytes" "io" "time" "golang.org/x/net/context" "golang.org/x/net/trace" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" ) @@ -48,16 +48,16 @@ import ( // On error, it returns the error and indicates whether the call should be retried. // // TODO(zhaoq): Check whether the received message sequence is valid. -func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { +func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { // Try to acquire header metadata from the server if there is any. var err error c.headerMD, err = stream.Header() if err != nil { return err } - p := &parser{s: stream} + p := &parser{r: stream} for { - if err = recv(p, codec, reply); err != nil { + if err = recv(p, dopts.codec, stream, dopts.dc, reply); err != nil { if err == io.EOF { break } @@ -69,7 +69,7 @@ func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream } // sendRequest writes out various information of an RPC such as Context and Message. -func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { +func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { stream, err := t.NewStream(ctx, callHdr) if err != nil { return nil, err @@ -81,8 +81,11 @@ func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t } } }() - // TODO(zhaoq): Support compression. - outBuf, err := encode(codec, args, compressionNone) + var cbuf *bytes.Buffer + if compressor != nil { + cbuf = new(bytes.Buffer) + } + outBuf, err := encode(codec, args, compressor, cbuf) if err != nil { return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) } @@ -94,14 +97,6 @@ func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t return stream, nil } -// callInfo contains all related configuration and information about an RPC. -type callInfo struct { - failFast bool - headerMD metadata.MD - trailerMD metadata.MD - traceInfo traceInfo // in trace.go -} - // Invoke is called by the generated code. It sends the RPC request on the // wire and returns after response is received. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { @@ -116,9 +111,8 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli o.after(&c) } }() - if EnableTracing { - c.traceInfo.tr = trace.New("Sent."+methodFamily(method), method) + c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) defer c.traceInfo.tr.Finish() c.traceInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { @@ -133,17 +127,11 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } }() } - - callHdr := &transport.CallHdr{ - Host: cc.authority, - Method: method, - } topts := &transport.Options{ Last: true, Delay: false, } var ( - ts int // track the transport sequence number lastErr error // record the error that happened ) for { @@ -156,7 +144,14 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli if lastErr != nil && c.failFast { return toRPCErr(lastErr) } - t, ts, err = cc.wait(ctx, ts) + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + } + t, err = cc.dopts.picker.Pick(ctx) if err != nil { if lastErr != nil { // This was a retry; return the error from the last attempt. @@ -164,10 +159,10 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } return toRPCErr(err) } - if EnableTracing { - c.traceInfo.tr.LazyLog(payload{args}, true) + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) } - stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts) + stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) if err != nil { if _, ok := err.(transport.ConnectionError); ok { lastErr = err @@ -179,12 +174,12 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli return toRPCErr(err) } // Receive the response - lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply) + lastErr = recvResponse(cc.dopts, t, &c, stream, reply) if _, ok := lastErr.(transport.ConnectionError); ok { continue } - if EnableTracing { - c.traceInfo.tr.LazyLog(payload{reply}, true) + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } t.CloseStream(stream, lastErr) if lastErr != nil { diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/clientconn.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/clientconn.go index e7bb9453b728..e2264236f6e9 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/clientconn.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/clientconn.go @@ -35,12 +35,14 @@ package grpc import ( "errors" + "fmt" "net" "strings" "sync" "time" "golang.org/x/net/context" + "golang.org/x/net/trace" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/transport" @@ -49,20 +51,34 @@ import ( var ( // ErrUnspecTarget indicates that the target address is unspecified. ErrUnspecTarget = errors.New("grpc: target is unspecified") + // ErrNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // ErrCredentialsMisuse indicates that users want to transmit security information + // (e.g., oauth2 token) which requires secure connection on an insecure + // connection. + ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)") // ErrClientConnClosing indicates that the operation is illegal because // the session is closing. ErrClientConnClosing = errors.New("grpc: the client connection is closing") // ErrClientConnTimeout indicates that the connection could not be // established or re-established within the specified timeout. ErrClientConnTimeout = errors.New("grpc: timed out trying to connect") + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second ) // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { - codec Codec - block bool - copts transport.ConnectOptions + codec Codec + cp Compressor + dc Decompressor + picker Picker + block bool + insecure bool + copts transport.ConnectOptions } // DialOption configures how we set up the connection. @@ -75,6 +91,29 @@ func WithCodec(c Codec) DialOption { } } +// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message +// compressor. +func WithCompressor(cp Compressor) DialOption { + return func(o *dialOptions) { + o.cp = cp + } +} + +// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating +// message decompressor. +func WithDecompressor(dc Decompressor) DialOption { + return func(o *dialOptions) { + o.dc = dc + } +} + +// WithPicker returns a DialOption which sets a picker for connection selection. +func WithPicker(p Picker) DialOption { + return func(o *dialOptions) { + o.picker = p + } +} + // WithBlock returns a DialOption which makes caller of Dial blocks until the underlying // connection is up. Without this, Dial returns immediately and connecting the server // happens in background. @@ -84,6 +123,14 @@ func WithBlock() DialOption { } } +// WithInsecure returns a DialOption which disables transport security for this ClientConn. +// Note that transport security is required unless WithInsecure is set. +func WithInsecure() DialOption { + return func(o *dialOptions) { + o.insecure = true + } +} + // WithTransportCredentials returns a DialOption which configures a // connection level security credentials (e.g., TLS/SSL). func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption { @@ -114,84 +161,247 @@ func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) Di } } +// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. +func WithUserAgent(s string) DialOption { + return func(o *dialOptions) { + o.copts.UserAgent = s + } +} + // Dial creates a client connection the given target. -// TODO(zhaoq): Have an option to make Dial return immediately without waiting -// for connection to complete. func Dial(target string, opts ...DialOption) (*ClientConn, error) { - if target == "" { - return nil, ErrUnspecTarget - } cc := &ClientConn{ - target: target, - shutdownChan: make(chan struct{}), + target: target, } for _, opt := range opts { opt(&cc.dopts) } + if cc.dopts.codec == nil { + // Set the default codec. + cc.dopts.codec = protoCodec{} + } + if cc.dopts.picker == nil { + cc.dopts.picker = &unicastPicker{ + target: target, + } + } + if err := cc.dopts.picker.Init(cc); err != nil { + return nil, err + } colonPos := strings.LastIndex(target, ":") if colonPos == -1 { colonPos = len(target) } cc.authority = target[:colonPos] - if cc.dopts.codec == nil { - // Set the default codec. - cc.dopts.codec = protoCodec{} + return cc, nil +} + +// ConnectivityState indicates the state of a client connection. +type ConnectivityState int + +const ( + // Idle indicates the ClientConn is idle. + Idle ConnectivityState = iota + // Connecting indicates the ClienConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +func (s ConnectivityState) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + panic(fmt.Sprintf("unknown connectivity state: %d", s)) } - if cc.dopts.block { - if err := cc.resetTransport(false); err != nil { +} + +// ClientConn represents a client connection to an RPC service. +type ClientConn struct { + target string + authority string + dopts dialOptions +} + +// State returns the connectivity state of cc. +// This is EXPERIMENTAL API. +func (cc *ClientConn) State() (ConnectivityState, error) { + return cc.dopts.picker.State() +} + +// WaitForStateChange blocks until the state changes to something other than the sourceState. +// It returns the new state or error. +// This is EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + return cc.dopts.picker.WaitForStateChange(ctx, sourceState) +} + +// Close starts to tear down the ClientConn. +func (cc *ClientConn) Close() error { + return cc.dopts.picker.Close() +} + +// Conn is a client connection to a single destination. +type Conn struct { + target string + dopts dialOptions + resetChan chan int + shutdownChan chan struct{} + events trace.EventLog + + mu sync.Mutex + state ConnectivityState + stateCV *sync.Cond + // ready is closed and becomes nil when a new transport is up or failed + // due to timeout. + ready chan struct{} + transport transport.ClientTransport +} + +// NewConn creates a Conn. +func NewConn(cc *ClientConn) (*Conn, error) { + if cc.target == "" { + return nil, ErrUnspecTarget + } + c := &Conn{ + target: cc.target, + dopts: cc.dopts, + resetChan: make(chan int, 1), + shutdownChan: make(chan struct{}), + } + if EnableTracing { + c.events = trace.NewEventLog("grpc.ClientConn", c.target) + } + if !c.dopts.insecure { + var ok bool + for _, cd := range c.dopts.copts.AuthOptions { + if _, ok := cd.(credentials.TransportAuthenticator); !ok { + continue + } + ok = true + } + if !ok { + return nil, ErrNoTransportSecurity + } + } else { + for _, cd := range c.dopts.copts.AuthOptions { + if cd.RequireTransportSecurity() { + return nil, ErrCredentialsMisuse + } + } + } + c.stateCV = sync.NewCond(&c.mu) + if c.dopts.block { + if err := c.resetTransport(false); err != nil { + c.Close() return nil, err } // Start to monitor the error status of transport. - go cc.transportMonitor() + go c.transportMonitor() } else { // Start a goroutine connecting to the server asynchronously. go func() { - if err := cc.resetTransport(false); err != nil { - grpclog.Printf("Failed to dial %s: %v; please retry.", target, err) + if err := c.resetTransport(false); err != nil { + grpclog.Printf("Failed to dial %s: %v; please retry.", c.target, err) + c.Close() return } - go cc.transportMonitor() + c.transportMonitor() }() } - return cc, nil + return c, nil } -// ClientConn represents a client connection to an RPC service. -type ClientConn struct { - target string - authority string - dopts dialOptions - shutdownChan chan struct{} +// printf records an event in cc's event log, unless cc has been closed. +// REQUIRES cc.mu is held. +func (cc *Conn) printf(format string, a ...interface{}) { + if cc.events != nil { + cc.events.Printf(format, a...) + } +} - mu sync.Mutex - // ready is closed and becomes nil when a new transport is up or failed - // due to timeout. - ready chan struct{} - // Indicates the ClientConn is under destruction. - closing bool - // Every time a new transport is created, this is incremented by 1. Used - // to avoid trying to recreate a transport while the new one is already - // under construction. - transportSeq int - transport transport.ClientTransport +// errorf records an error in cc's event log, unless cc has been closed. +// REQUIRES cc.mu is held. +func (cc *Conn) errorf(format string, a ...interface{}) { + if cc.events != nil { + cc.events.Errorf(format, a...) + } +} + +// State returns the connectivity state of the Conn +func (cc *Conn) State() ConnectivityState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.state +} + +// WaitForStateChange blocks until the state changes to something other than the sourceState. +func (cc *Conn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + cc.mu.Lock() + defer cc.mu.Unlock() + if sourceState != cc.state { + return cc.state, nil + } + done := make(chan struct{}) + var err error + go func() { + select { + case <-ctx.Done(): + cc.mu.Lock() + err = ctx.Err() + cc.stateCV.Broadcast() + cc.mu.Unlock() + case <-done: + } + }() + defer close(done) + for sourceState == cc.state { + cc.stateCV.Wait() + if err != nil { + return cc.state, err + } + } + return cc.state, nil } -func (cc *ClientConn) resetTransport(closeTransport bool) error { +// NotifyReset tries to signal the underlying transport needs to be reset due to +// for example a name resolution change in flight. +func (cc *Conn) NotifyReset() { + select { + case cc.resetChan <- 0: + default: + } +} + +func (cc *Conn) resetTransport(closeTransport bool) error { var retries int start := time.Now() for { cc.mu.Lock() - t := cc.transport - ts := cc.transportSeq - // Avoid wait() picking up a dying transport unnecessarily. - cc.transportSeq = 0 - if cc.closing { + cc.printf("connecting") + if cc.state == Shutdown { + // cc.Close() has been invoked. cc.mu.Unlock() return ErrClientConnClosing } + cc.state = Connecting + cc.stateCV.Broadcast() cc.mu.Unlock() if closeTransport { - t.Close() + cc.transport.Close() } // Adjust timeout for the current try. copts := cc.dopts.copts @@ -206,29 +416,64 @@ func (cc *ClientConn) resetTransport(closeTransport bool) error { return ErrClientConnTimeout } } - newTransport, err := transport.NewClientTransport(cc.target, &copts) + sleepTime := backoff(retries) + timeout := sleepTime + if timeout < minConnectTimeout { + timeout = minConnectTimeout + } + if copts.Timeout == 0 || copts.Timeout > timeout { + copts.Timeout = timeout + } + connectTime := time.Now() + addr, err := cc.dopts.picker.PickAddr() + var newTransport transport.ClientTransport + if err == nil { + newTransport, err = transport.NewClientTransport(addr, &copts) + } if err != nil { - sleepTime := backoff(retries) + cc.mu.Lock() + if cc.state == Shutdown { + // cc.Close() has been invoked. + cc.mu.Unlock() + return ErrClientConnClosing + } + cc.errorf("transient failure: %v", err) + cc.state = TransientFailure + cc.stateCV.Broadcast() + if cc.ready != nil { + close(cc.ready) + cc.ready = nil + } + cc.mu.Unlock() + sleepTime -= time.Since(connectTime) + if sleepTime < 0 { + sleepTime = 0 + } // Fail early before falling into sleep. if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) { + cc.mu.Lock() + cc.errorf("connection timeout") + cc.mu.Unlock() cc.Close() return ErrClientConnTimeout } closeTransport = false time.Sleep(sleepTime) retries++ - grpclog.Printf("grpc: ClientConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target) + grpclog.Printf("grpc: Conn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target) continue } cc.mu.Lock() - if cc.closing { + cc.printf("ready") + if cc.state == Shutdown { // cc.Close() has been invoked. cc.mu.Unlock() newTransport.Close() return ErrClientConnClosing } + cc.state = Ready + cc.stateCV.Broadcast() cc.transport = newTransport - cc.transportSeq = ts + 1 if cc.ready != nil { close(cc.ready) cc.ready = nil @@ -238,40 +483,65 @@ func (cc *ClientConn) resetTransport(closeTransport bool) error { } } +func (cc *Conn) reconnect() bool { + cc.mu.Lock() + if cc.state == Shutdown { + // cc.Close() has been invoked. + cc.mu.Unlock() + return false + } + cc.state = TransientFailure + cc.stateCV.Broadcast() + cc.mu.Unlock() + if err := cc.resetTransport(true); err != nil { + // The ClientConn is closing. + cc.mu.Lock() + cc.printf("transport exiting: %v", err) + cc.mu.Unlock() + grpclog.Printf("grpc: Conn.transportMonitor exits due to: %v", err) + return false + } + return true +} + // Run in a goroutine to track the error in transport and create the // new transport if an error happens. It returns when the channel is closing. -func (cc *ClientConn) transportMonitor() { +func (cc *Conn) transportMonitor() { for { select { - // shutdownChan is needed to detect the channel teardown when + // shutdownChan is needed to detect the teardown when // the ClientConn is idle (i.e., no RPC in flight). case <-cc.shutdownChan: return + case <-cc.resetChan: + if !cc.reconnect() { + return + } case <-cc.transport.Error(): - if err := cc.resetTransport(true); err != nil { - // The channel is closing. - grpclog.Printf("grpc: ClientConn.transportMonitor exits due to: %v", err) + if !cc.reconnect() { return } - continue + // Tries to drain reset signal if there is any since it is out-dated. + select { + case <-cc.resetChan: + default: + } } } } -// When wait returns, either the new transport is up or ClientConn is -// closing. Used to avoid working on a dying transport. It updates and -// returns the transport and its version when there is no error. -func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTransport, int, error) { +// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed. +func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) { for { cc.mu.Lock() switch { - case cc.closing: + case cc.state == Shutdown: cc.mu.Unlock() - return nil, 0, ErrClientConnClosing - case ts < cc.transportSeq: - // Worked on a dying transport. Try the new one immediately. - defer cc.mu.Unlock() - return cc.transport, cc.transportSeq, nil + return nil, ErrClientConnClosing + case cc.state == Ready: + ct := cc.transport + cc.mu.Unlock() + return ct, nil default: ready := cc.ready if ready == nil { @@ -281,7 +551,7 @@ func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTranspo cc.mu.Unlock() select { case <-ctx.Done(): - return nil, 0, transport.ContextErr(ctx.Err()) + return nil, transport.ContextErr(ctx.Err()) // Wait until the new transport is ready or failed. case <-ready: } @@ -289,18 +559,23 @@ func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTranspo } } -// Close starts to tear down the ClientConn. Returns ErrClientConnClosing if +// Close starts to tear down the Conn. Returns ErrClientConnClosing if // it has been closed (mostly due to dial time-out). // TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in // some edge cases (e.g., the caller opens and closes many ClientConn's in a // tight loop. -func (cc *ClientConn) Close() error { +func (cc *Conn) Close() error { cc.mu.Lock() defer cc.mu.Unlock() - if cc.closing { + if cc.state == Shutdown { return ErrClientConnClosing } - cc.closing = true + cc.state = Shutdown + cc.stateCV.Broadcast() + if cc.events != nil { + cc.events.Finish() + cc.events = nil + } if cc.ready != nil { close(cc.ready) cc.ready = nil diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/codes.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/codes.go index e14b464acfd4..37c5b860bd6d 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/codes.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/codes.go @@ -33,7 +33,7 @@ // Package codes defines the canonical error codes used by gRPC. It is // consistent across various languages. -package codes // import "google.golang.org/grpc/codes" +package codes // A Code is an unsigned 32-bit error code as defined in the gRPC spec. type Code uint32 diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/credentials/credentials.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/credentials/credentials.go index 63c42a29cc4b..0b0b89b6aa6e 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/credentials/credentials.go @@ -35,7 +35,7 @@ // which encapsulate all the state needed by a client to authenticate with a // server and make various assertions, e.g., about the client's identity, role, // or whether it is authorized to make a particular call. -package credentials // import "google.golang.org/grpc/credentials" +package credentials import ( "crypto/tls" @@ -47,14 +47,11 @@ import ( "time" "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" ) var ( // alpnProtoStr are the specified application level protocols for gRPC. - alpnProtoStr = []string{"h2", "h2-14", "h2-15", "h2-16"} + alpnProtoStr = []string{"h2"} ) // Credentials defines the common interface all supported credentials must @@ -63,11 +60,15 @@ type Credentials interface { // GetRequestMetadata gets the current request metadata, refreshing // tokens if required. This should be called by the transport layer on // each request, and the data should be populated in headers or other - // context. When supported by the underlying implementation, ctx can - // be used for timeout and cancellation. + // context. uri is the URI of the entry point for the request. When + // supported by the underlying implementation, ctx can be used for + // timeout and cancellation. // TODO(zhaoq): Define the set of the qualified keys instead of leaving // it as an arbitrary string. - GetRequestMetadata(ctx context.Context) (map[string]string, error) + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentails requires + // transport security. + RequireTransportSecurity() bool } // ProtocolInfo provides information regarding the gRPC wire protocol version, @@ -81,26 +82,44 @@ type ProtocolInfo struct { SecurityVersion string } +// AuthInfo defines the common interface for the auth information the users are interested in. +type AuthInfo interface { + AuthType() string +} + // TransportAuthenticator defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). type TransportAuthenticator interface { // ClientHandshake does the authentication handshake specified by the corresponding - // authentication protocol on rawConn for clients. - ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, error) - // ServerHandshake does the authentication handshake for servers. - ServerHandshake(rawConn net.Conn) (net.Conn, error) + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. + ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) // Info provides the ProtocolInfo of this TransportAuthenticator. Info() ProtocolInfo Credentials } +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState +} + +func (t TLSInfo) AuthType() string { + return "tls" +} + // tlsCreds is the credentials required for authenticating a connection using TLS. type tlsCreds struct { // TLS configuration config tls.Config } -func (c *tlsCreds) Info() ProtocolInfo { +func (c tlsCreds) Info() ProtocolInfo { return ProtocolInfo{ SecurityProtocol: "tls", SecurityVersion: "1.2", @@ -109,17 +128,21 @@ func (c *tlsCreds) Info() ProtocolInfo { // GetRequestMetadata returns nil, nil since TLS credentials does not have // metadata. -func (c *tlsCreds) GetRequestMetadata(ctx context.Context) (map[string]string, error) { +func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { return nil, nil } +func (c *tlsCreds) RequireTransportSecurity() bool { + return true +} + type timeoutError struct{} func (timeoutError) Error() string { return "credentials: Dial timed out" } func (timeoutError) Timeout() bool { return true } func (timeoutError) Temporary() bool { return true } -func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, err error) { +func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) { // borrow some code from tls.DialWithDialer var errChannel chan error if timeout != 0 { @@ -146,18 +169,20 @@ func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.D } if err != nil { rawConn.Close() - return nil, err + return nil, nil, err } - return conn, nil + // TODO(zhaoq): Omit the auth info for client now. It is more for + // information than anything else. + return conn, nil, nil } -func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, error) { +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { conn := tls.Server(rawConn, &c.config) if err := conn.Handshake(); err != nil { rawConn.Close() - return nil, err + return nil, nil, err } - return conn, nil + return conn, TLSInfo{conn.ConnectionState()}, nil } // NewTLS uses c to construct a TransportAuthenticator based on TLS. @@ -199,72 +224,3 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, err } return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil } - -// TokenSource supplies credentials from an oauth2.TokenSource. -type TokenSource struct { - oauth2.TokenSource -} - -// GetRequestMetadata gets the request metadata as a map from a TokenSource. -func (ts TokenSource) GetRequestMetadata(ctx context.Context) (map[string]string, error) { - token, err := ts.Token() - if err != nil { - return nil, err - } - return map[string]string{ - "authorization": token.TokenType + " " + token.AccessToken, - }, nil -} - -// NewComputeEngine constructs the credentials that fetches access tokens from -// Google Compute Engine (GCE)'s metadata server. It is only valid to use this -// if your program is running on a GCE instance. -// TODO(dsymonds): Deprecate and remove this. -func NewComputeEngine() Credentials { - return TokenSource{google.ComputeTokenSource("")} -} - -// serviceAccount represents credentials via JWT signing key. -type serviceAccount struct { - config *jwt.Config -} - -func (s serviceAccount) GetRequestMetadata(ctx context.Context) (map[string]string, error) { - token, err := s.config.TokenSource(ctx).Token() - if err != nil { - return nil, err - } - return map[string]string{ - "authorization": token.TokenType + " " + token.AccessToken, - }, nil -} - -// NewServiceAccountFromKey constructs the credentials using the JSON key slice -// from a Google Developers service account. -func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (Credentials, error) { - config, err := google.JWTConfigFromJSON(jsonKey, scope...) - if err != nil { - return nil, err - } - return serviceAccount{config: config}, nil -} - -// NewServiceAccountFromFile constructs the credentials using the JSON key file -// of a Google Developers service account. -func NewServiceAccountFromFile(keyFile string, scope ...string) (Credentials, error) { - jsonKey, err := ioutil.ReadFile(keyFile) - if err != nil { - return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) - } - return NewServiceAccountFromKey(jsonKey, scope...) -} - -// NewApplicationDefault returns "Application Default Credentials". For more -// detail, see https://developers.google.com/accounts/docs/application-default-credentials. -func NewApplicationDefault(ctx context.Context, scope ...string) (Credentials, error) { - t, err := google.DefaultTokenSource(ctx, scope...) - if err != nil { - return nil, err - } - return TokenSource{t}, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/doc.go index c63847745da8..b4c0e740e9c7 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/doc.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/doc.go @@ -1,6 +1,6 @@ /* Package grpc implements an RPC system called gRPC. -See https://github.com/grpc/grpc for more information about gRPC. +See www.grpc.io for more information about gRPC. */ -package grpc // import "google.golang.org/grpc" +package grpc diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/grpclog/logger.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/grpclog/logger.go index ec089f70f813..2cc09be48945 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/grpclog/logger.go @@ -34,7 +34,7 @@ /* Package grpclog defines logging for grpc. */ -package grpclog // import "google.golang.org/grpc/grpclog" +package grpclog import ( "log" @@ -42,6 +42,8 @@ import ( ) // Use golang's standard logger by default. +// Access is not mutex-protected: do not modify except in init() +// functions. var logger Logger = log.New(os.Stderr, "", log.LstdFlags) // Logger mimics golang's standard Logger as an interface. @@ -54,7 +56,8 @@ type Logger interface { Println(args ...interface{}) } -// SetLogger sets the logger that is used in grpc. +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. func SetLogger(l Logger) { logger = l } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/internal/internal.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 000000000000..5489143a85c1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,49 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package internal contains gRPC-internal code for testing, to avoid polluting +// the godoc of the top-level grpc package. +package internal + +// TestingCloseConns closes all existing transports but keeps +// grpcServer.lis accepting new connections. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingCloseConns func(grpcServer interface{}) + +// TestingUseHandlerImpl enables the http.Handler-based server implementation. +// It must be called before Serve and requires TLS credentials. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingUseHandlerImpl func(grpcServer interface{}) diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/metadata/metadata.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/metadata/metadata.go index 8b4ed9e1a8a0..58469ddd3fa4 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/metadata/metadata.go @@ -32,7 +32,7 @@ */ // Package metadata define the structure of the metadata supported by gRPC library. -package metadata // import "google.golang.org/grpc/metadata" +package metadata import ( "encoding/base64" @@ -46,27 +46,16 @@ const ( binHdrSuffix = "-bin" ) -// grpc-http2 requires ASCII header key and value (more detail can be found in -// "Requests" subsection in go/grpc-http2). -func isASCII(s string) bool { - for _, c := range s { - if c > 127 { - return false - } - } - return true -} - // encodeKeyValue encodes key and value qualified for transmission via gRPC. // Transmitting binary headers violates HTTP/2 spec. // TODO(zhaoq): Maybe check if k is ASCII also. func encodeKeyValue(k, v string) (string, string) { - if isASCII(v) { - return k, v + k = strings.ToLower(k) + if strings.HasSuffix(k, binHdrSuffix) { + val := base64.StdEncoding.EncodeToString([]byte(v)) + v = string(val) } - key := k + binHdrSuffix - val := base64.StdEncoding.EncodeToString([]byte(v)) - return key, string(val) + return k, v } // DecodeKeyValue returns the original key and value corresponding to the @@ -75,24 +64,23 @@ func DecodeKeyValue(k, v string) (string, string, error) { if !strings.HasSuffix(k, binHdrSuffix) { return k, v, nil } - key := k[:len(k)-len(binHdrSuffix)] val, err := base64.StdEncoding.DecodeString(v) if err != nil { return "", "", err } - return key, string(val), nil + return k, string(val), nil } // MD is a mapping from metadata keys to values. Users should use the following // two convenience functions New and Pairs to generate MD. -type MD map[string]string +type MD map[string][]string // New creates a MD from given key-value map. func New(m map[string]string) MD { md := MD{} for k, v := range m { key, val := encodeKeyValue(k, v) - md[key] = val + md[key] = append(md[key], val) } return md } @@ -111,7 +99,7 @@ func Pairs(kv ...string) MD { continue } key, val := encodeKeyValue(k, s) - md[key] = val + md[key] = append(md[key], val) } return md } @@ -125,7 +113,9 @@ func (md MD) Len() int { func (md MD) Copy() MD { out := MD{} for k, v := range md { - out[k] = v + for _, i := range v { + out[k] = append(out[k], i) + } } return out } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/naming/naming.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/naming/naming.go new file mode 100644 index 000000000000..06605607c371 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package naming defines the naming API and related data structures for gRPC. +// The interface is EXPERIMENTAL and may be suject to change. +package naming + +// Operation defines the corresponding operations for a name resolution change. +type Operation uint8 + +const ( + // Add indicates a new address is added. + Add Operation = iota + // Delete indicates an exisiting address is deleted. + Delete +) + +// Update defines a name resolution update. Notice that it is not valid having both +// empty string Addr and nil Metadata in an Update. +type Update struct { + // Op indicates the operation of the update. + Op Operation + // Addr is the updated address. It is empty string if there is no address update. + Addr string + // Metadata is the updated metadata. It is nil if there is no metadata update. + // Metadata is not required for a custom naming implementation. + Metadata interface{} +} + +// Resolver creates a Watcher for a target to track its resolution changes. +type Resolver interface { + // Resolve creates a Watcher for target. + Resolve(target string) (Watcher, error) +} + +// Watcher watches for the updates on the specified target. +type Watcher interface { + // Next blocks until an update or error happens. It may return one or more + // updates. The first call should get the full set of the results. + Next() ([]*Update, error) + // Close closes the Watcher. + Close() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/peer/peer.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 000000000000..bfa6205ba9ea --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/picker.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/picker.go new file mode 100644 index 000000000000..50f315b44f37 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/picker.go @@ -0,0 +1,243 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "container/list" + "fmt" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/transport" +) + +// Picker picks a Conn for RPC requests. +// This is EXPERIMENTAL and please do not implement your own Picker for now. +type Picker interface { + // Init does initial processing for the Picker, e.g., initiate some connections. + Init(cc *ClientConn) error + // Pick blocks until either a transport.ClientTransport is ready for the upcoming RPC + // or some error happens. + Pick(ctx context.Context) (transport.ClientTransport, error) + // PickAddr picks a peer address for connecting. This will be called repeated for + // connecting/reconnecting. + PickAddr() (string, error) + // State returns the connectivity state of the underlying connections. + State() (ConnectivityState, error) + // WaitForStateChange blocks until the state changes to something other than + // the sourceState. It returns the new state or error. + WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) + // Close closes all the Conn's owned by this Picker. + Close() error +} + +// unicastPicker is the default Picker which is used when there is no custom Picker +// specified by users. It always picks the same Conn. +type unicastPicker struct { + target string + conn *Conn +} + +func (p *unicastPicker) Init(cc *ClientConn) error { + c, err := NewConn(cc) + if err != nil { + return err + } + p.conn = c + return nil +} + +func (p *unicastPicker) Pick(ctx context.Context) (transport.ClientTransport, error) { + return p.conn.Wait(ctx) +} + +func (p *unicastPicker) PickAddr() (string, error) { + return p.target, nil +} + +func (p *unicastPicker) State() (ConnectivityState, error) { + return p.conn.State(), nil +} + +func (p *unicastPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + return p.conn.WaitForStateChange(ctx, sourceState) +} + +func (p *unicastPicker) Close() error { + if p.conn != nil { + return p.conn.Close() + } + return nil +} + +// unicastNamingPicker picks an address from a name resolver to set up the connection. +type unicastNamingPicker struct { + cc *ClientConn + resolver naming.Resolver + watcher naming.Watcher + mu sync.Mutex + // The list of the addresses are obtained from watcher. + addrs *list.List + // It tracks the current picked addr by PickAddr(). The next PickAddr may + // push it forward on addrs. + pickedAddr *list.Element + conn *Conn +} + +// NewUnicastNamingPicker creates a Picker to pick addresses from a name resolver +// to connect. +func NewUnicastNamingPicker(r naming.Resolver) Picker { + return &unicastNamingPicker{ + resolver: r, + addrs: list.New(), + } +} + +type addrInfo struct { + addr string + // Set to true if this addrInfo needs to be deleted in the next PickAddrr() call. + deleting bool +} + +// processUpdates calls Watcher.Next() once and processes the obtained updates. +func (p *unicastNamingPicker) processUpdates() error { + updates, err := p.watcher.Next() + if err != nil { + return err + } + for _, update := range updates { + switch update.Op { + case naming.Add: + p.mu.Lock() + p.addrs.PushBack(&addrInfo{ + addr: update.Addr, + }) + p.mu.Unlock() + // Initial connection setup + if p.conn == nil { + conn, err := NewConn(p.cc) + if err != nil { + return err + } + p.conn = conn + } + case naming.Delete: + p.mu.Lock() + for e := p.addrs.Front(); e != nil; e = e.Next() { + if update.Addr == e.Value.(*addrInfo).addr { + if e == p.pickedAddr { + // Do not remove the element now if it is the current picked + // one. We leave the deletion to the next PickAddr() call. + e.Value.(*addrInfo).deleting = true + // Notify Conn to close it. All the live RPCs on this connection + // will be aborted. + p.conn.NotifyReset() + } else { + p.addrs.Remove(e) + } + } + } + p.mu.Unlock() + default: + grpclog.Println("Unknown update.Op ", update.Op) + } + } + return nil +} + +// monitor runs in a standalone goroutine to keep watching name resolution updates until the watcher +// is closed. +func (p *unicastNamingPicker) monitor() { + for { + if err := p.processUpdates(); err != nil { + return + } + } +} + +func (p *unicastNamingPicker) Init(cc *ClientConn) error { + w, err := p.resolver.Resolve(cc.target) + if err != nil { + return err + } + p.watcher = w + p.cc = cc + // Get the initial name resolution. + if err := p.processUpdates(); err != nil { + return err + } + go p.monitor() + return nil +} + +func (p *unicastNamingPicker) Pick(ctx context.Context) (transport.ClientTransport, error) { + return p.conn.Wait(ctx) +} + +func (p *unicastNamingPicker) PickAddr() (string, error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.pickedAddr == nil { + p.pickedAddr = p.addrs.Front() + } else { + pa := p.pickedAddr + p.pickedAddr = pa.Next() + if pa.Value.(*addrInfo).deleting { + p.addrs.Remove(pa) + } + if p.pickedAddr == nil { + p.pickedAddr = p.addrs.Front() + } + } + if p.pickedAddr == nil { + return "", fmt.Errorf("there is no address available to pick") + } + return p.pickedAddr.Value.(*addrInfo).addr, nil +} + +func (p *unicastNamingPicker) State() (ConnectivityState, error) { + return 0, fmt.Errorf("State() is not supported for unicastNamingPicker") +} + +func (p *unicastNamingPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + return 0, fmt.Errorf("WaitForStateChange is not supported for unicastNamingPciker") +} + +func (p *unicastNamingPicker) Close() error { + p.watcher.Close() + p.conn.Close() + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/rpc_util.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/rpc_util.go index 49512603bbc0..96c790bed16b 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/rpc_util.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/rpc_util.go @@ -35,9 +35,12 @@ package grpc import ( "bytes" + "compress/gzip" "encoding/binary" "fmt" "io" + "io/ioutil" + "math" "math/rand" "os" "time" @@ -75,6 +78,71 @@ func (protoCodec) String() string { return "proto" } +// Compressor defines the interface gRPC uses to compress a message. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +func NewGZIPCompressor() Compressor { + return &gzipCompressor{} +} + +type gzipCompressor struct { +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := gzip.NewWriter(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + z, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + defer z.Close() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + failFast bool + headerMD metadata.MD + trailerMD metadata.MD + traceInfo traceInfo // in trace.go +} + // CallOption configures a Call before it starts or extracts information from // a Call after it completes. type CallOption interface { @@ -118,51 +186,62 @@ type payloadFormat uint8 const ( compressionNone payloadFormat = iota // no compression - compressionFlate - // More formats + compressionMade ) // parser reads complelete gRPC messages from the underlying reader. type parser struct { - s io.Reader -} - -// msgFixedHeader defines the header of a gRPC message (go/grpc-wirefmt). -type msgFixedHeader struct { - T payloadFormat - Length uint32 + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail + // at http://www.grpc.io/docs/guides/wire.html. + header [5]byte } -// recvMsg is to read a complete gRPC message from the stream. It is blocking if -// the message has not been complete yet. It returns the message and its type, -// EOF is returned with nil msg and 0 pf if the entire stream is done. Other -// non-nil error is returned if something is wrong on reading. +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { - var hdr msgFixedHeader - if err := binary.Read(p.s, binary.BigEndian, &hdr); err != nil { + if _, err := io.ReadFull(p.r, p.header[:]); err != nil { return 0, nil, err } - if hdr.Length == 0 { - return hdr.T, nil, nil + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil } - msg = make([]byte, int(hdr.Length)) - if _, err := io.ReadFull(p.s, msg); err != nil { + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := io.ReadFull(p.r, msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } - return hdr.T, msg, nil + return pf, msg, nil } // encode serializes msg and prepends the message header. If msg is nil, it // generates the message header of 0 message length. -func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { - var buf bytes.Buffer - // Write message fixed header. - buf.WriteByte(uint8(pf)) +func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) { var b []byte - var length uint32 + var length uint if msg != nil { var err error // TODO(zhaoq): optimize to reduce memory alloc and copying. @@ -170,27 +249,71 @@ func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) { if err != nil { return nil, err } - length = uint32(len(b)) + if cp != nil { + if err := cp.Do(cbuf, b); err != nil { + return nil, err + } + b = cbuf.Bytes() + } + length = uint(len(b)) + } + if length > math.MaxUint32 { + return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length) } - var szHdr [4]byte - binary.BigEndian.PutUint32(szHdr[:], length) - buf.Write(szHdr[:]) - buf.Write(b) - return buf.Bytes(), nil + + const ( + payloadLen = 1 + sizeLen = 4 + ) + + var buf = make([]byte, payloadLen+sizeLen+len(b)) + + // Write payload format + if cp == nil { + buf[0] = byte(compressionNone) + } else { + buf[0] = byte(compressionMade) + } + // Write length of b into buf + binary.BigEndian.PutUint32(buf[1:], uint32(length)) + // Copy encoded msg to buf + copy(buf[5:], b) + + return buf, nil +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" { + return transport.StreamErrorf(codes.InvalidArgument, "grpc: invalid grpc-encoding %q with compression enabled", recvCompress) + } + if dc == nil || recvCompress != dc.Type() { + return transport.StreamErrorf(codes.InvalidArgument, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return transport.StreamErrorf(codes.InvalidArgument, "grpc: received unexpected payload format %d", pf) + } + return nil } -func recv(p *parser, c Codec, m interface{}) error { +func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}) error { pf, d, err := p.recvMsg() if err != nil { return err } - switch pf { - case compressionNone: - if err := c.Unmarshal(d, m); err != nil { - return Errorf(codes.Internal, "grpc: %v", err) + if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil { + return err + } + if pf == compressionMade { + d, err = dc.Do(bytes.NewReader(d)) + if err != nil { + return transport.StreamErrorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } - default: - return Errorf(codes.Internal, "gprc: compression is not supported yet.") + } + if err := c.Unmarshal(d, m); err != nil { + return transport.StreamErrorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) } return nil } @@ -217,6 +340,18 @@ func Code(err error) codes.Code { return codes.Unknown } +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +func ErrorDesc(err error) string { + if err == nil { + return "" + } + if e, ok := err.(rpcError); ok { + return e.desc + } + return err.Error() +} + // Errorf returns an error containing an error code and a description; // Errorf returns nil if c is OK. func Errorf(c codes.Code, format string, a ...interface{}) error { @@ -232,6 +367,8 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { // toRPCErr converts an error into a rpcError. func toRPCErr(err error) error { switch e := err.(type) { + case rpcError: + return err case transport.StreamError: return rpcError{ code: e.Code, @@ -277,30 +414,39 @@ func convertCode(err error) codes.Code { const ( // how long to wait after the first failure before retrying baseDelay = 1.0 * time.Second - // upper bound on backoff delay - maxDelay = 120 * time.Second - backoffFactor = 2.0 // backoff increases by this factor on each retry - backoffRange = 0.4 // backoff is randomized downwards by this factor + // upper bound of backoff delay + maxDelay = 120 * time.Second + // backoff increases by this factor on each retry + backoffFactor = 1.6 + // backoff is randomized downwards by this factor + backoffJitter = 0.2 ) -// backoff returns a value in [0, maxDelay] that increases exponentially with -// retries, starting from baseDelay. -func backoff(retries int) time.Duration { +func backoff(retries int) (t time.Duration) { + if retries == 0 { + return baseDelay + } backoff, max := float64(baseDelay), float64(maxDelay) for backoff < max && retries > 0 { - backoff = backoff * backoffFactor + backoff *= backoffFactor retries-- } if backoff > max { backoff = max } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. We just subtract up - // to 40% so that we obey maxDelay. - backoff -= backoff * backoffRange * rand.Float64() + // the same time, they won't operate in lockstep. + backoff *= 1 + backoffJitter*(rand.Float64()*2-1) if backoff < 0 { return 0 } return time.Duration(backoff) } + +// SupportPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the grpc package. +// +// This constant may be renamed in the future if a change in the generated code +// requires a synchronised update of grpc-go and protoc-gen-go. This constant +// should not be referenced from any other code. +const SupportPackageIsVersion1 = true diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/server.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/server.go index cbf4947b4067..bdf68a0fc9ba 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/server.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/server.go @@ -34,23 +34,30 @@ package grpc import ( + "bytes" "errors" "fmt" "io" "net" + "net/http" "reflect" + "runtime" "strings" "sync" + "time" "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" ) -type methodHandler func(srv interface{}, ctx context.Context, codec Codec, buf []byte) (interface{}, error) +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -78,17 +85,22 @@ type service struct { // Server is a gRPC server to serve RPC requests. type Server struct { - opts options - mu sync.Mutex - lis map[net.Listener]bool - conns map[transport.ServerTransport]bool - m map[string]*service // service name -> service info + opts options + + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[io.Closer]bool + m map[string]*service // service name -> service info + events trace.EventLog } type options struct { creds credentials.Credentials codec Codec + cp Compressor + dc Decompressor maxConcurrentStreams uint32 + useHandlerImpl bool // use http.Handler-based server } // A ServerOption sets options. @@ -101,6 +113,18 @@ func CustomCodec(codec Codec) ServerOption { } } +func RPCCompressor(cp Compressor) ServerOption { + return func(o *options) { + o.cp = cp + } +} + +func RPCDecompressor(dc Decompressor) ServerOption { + return func(o *options) { + o.dc = dc + } +} + // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { @@ -127,12 +151,33 @@ func NewServer(opt ...ServerOption) *Server { // Set the default codec. opts.codec = protoCodec{} } - return &Server{ + s := &Server{ lis: make(map[net.Listener]bool), opts: opts, - conns: make(map[transport.ServerTransport]bool), + conns: make(map[io.Closer]bool), m: make(map[string]*service), } + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } } // RegisterService register a service and its implementation to the gRPC @@ -150,6 +195,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { func (s *Server) register(sd *ServiceDesc, ss interface{}) { s.mu.Lock() defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) if _, ok := s.m[sd.ServiceName]; ok { grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) } @@ -175,12 +221,21 @@ var ( ErrServerStopped = errors.New("grpc: the server has been stopped") ) +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + creds, ok := s.opts.creds.(credentials.TransportAuthenticator) + if !ok { + return rawConn, nil, nil + } + return creds.ServerHandshake(rawConn) +} + // Serve accepts incoming connections on the listener lis, creating a new // ServerTransport and service goroutine for each. The service goroutines -// read gRPC request and then call the registered handlers to reply to them. +// read gRPC requests and then call the registered handlers to reply to them. // Service returns when lis.Accept fails. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() + s.printf("serving") if s.lis == nil { s.mu.Unlock() return ErrServerStopped @@ -194,46 +249,167 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() for { - c, err := lis.Accept() + rawConn, err := lis.Accept() if err != nil { + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() return err } - if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok { - c, err = creds.ServerHandshake(c) - if err != nil { - grpclog.Println("grpc: Server.Serve failed to complete security handshake.") - continue - } - } + // Start a new goroutine to deal with rawConn + // so we don't stall this Accept loop goroutine. + go s.handleRawConn(rawConn) + } +} + +// handleRawConn is run in its own goroutine and handles a just-accepted +// connection that has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - c.Close() - return nil - } - st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams) - if err != nil { - s.mu.Unlock() - c.Close() - grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) - continue - } - s.conns[st] = true + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() + grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + rawConn.Close() + return + } + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + conn.Close() + return + } + s.mu.Unlock() + + if s.opts.useHandlerImpl { + s.serveUsingHandler(conn) + } else { + s.serveNewHTTP2Transport(conn, authInfo) + } +} + +// serveNewHTTP2Transport sets up a new http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go) and +// serves streams on it. +// This is run in its own goroutine (it does network I/O in +// transport.NewServerTransport). +func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { + st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) + return + } + if !s.addConn(st) { + st.Close() + return + } + s.serveStreams(st) +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer s.removeConn(st) + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) go func() { - st.HandleStreams(func(stream *transport.Stream) { - s.handleStream(st, stream) - }) - s.mu.Lock() - delete(s.conns, st) - s.mu.Unlock() + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) }() + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// serveUsingHandler is called from handleRawConn when s is configured +// to handle requests via the http.Handler interface. It sets up a +// net/http.Server to handle the just-accepted conn. The http.Server +// is configured to route all incoming requests (all HTTP/2 streams) +// to ServeHTTP, which creates a new ServerTransport for each stream. +// serveUsingHandler blocks until conn closes. +// +// This codepath is only used when Server.TestingUseHandlerImpl has +// been configured. This lets the end2end tests exercise the ServeHTTP +// method as one of the environment types. +// +// conn is the *tls.Conn that's already been authenticated. +func (s *Server) serveUsingHandler(conn net.Conn) { + if !s.addConn(conn) { + conn.Close() + return + } + defer s.removeConn(conn) + h2s := &http2.Server{ + MaxConcurrentStreams: s.opts.maxConcurrentStreams, } + h2s.ServeConn(conn, &http2.ServeConnOpts{ + Handler: s, + }) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, pf payloadFormat, opts *transport.Options) error { - p, err := encode(s.opts.codec, msg, pf) +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + st.Close() + return + } + defer s.removeConn(st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + trInfo = &traceInfo{ + tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), + } + trInfo.firstLine.client = false + trInfo.firstLine.remoteAddr = st.RemoteAddr() + stream.TraceContext(trInfo.tr) + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = dl.Sub(time.Now()) + } + return trInfo +} + +func (s *Server) addConn(c io.Closer) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + return false + } + s.conns[c] = true + return true +} + +func (s *Server) removeConn(c io.Closer) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, c) + } +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error { + var cbuf *bytes.Buffer + if cp != nil { + cbuf = new(bytes.Buffer) + } + p, err := encode(s.opts.codec, msg, cp, cbuf) if err != nil { // This typically indicates a fatal issue (e.g., memory // corruption or hardware faults) the application program @@ -247,13 +423,27 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return t.Write(stream, p, opts) } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc) { - p := &parser{s: stream} +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { + if trInfo != nil { + defer trInfo.tr.Finish() + trInfo.firstLine.client = false + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + }() + } + p := &parser{r: stream} for { pf, req, err := p.recvMsg() if err == io.EOF { // The entire stream is done (for unary RPC only). - return + return err + } + if err == io.ErrUnexpectedEOF { + err = transport.StreamError{Code: codes.Internal, Desc: "io.ErrUnexpectedEOF"} } if err != nil { switch err := err.(type) { @@ -261,110 +451,226 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // Nothing to do here. case transport.StreamError: if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) } default: panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) } - return + return err } - switch pf { - case compressionNone: - statusCode := codes.OK - statusDesc := "" - reply, appErr := md.Handler(srv.server, stream.Context(), s.opts.codec, req) - if appErr != nil { - if err, ok := appErr.(rpcError); ok { - statusCode = err.code - statusDesc = err.desc - } else { - statusCode = convertCode(appErr) - statusDesc = appErr.Error() + + if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { + switch err := err.(type) { + case transport.StreamError: + if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) } - if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + default: + if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) } - return - } - opts := &transport.Options{ - Last: true, - Delay: false, + } - if err := s.sendResponse(t, stream, reply, compressionNone, opts); err != nil { - if _, ok := err.(transport.ConnectionError); ok { - return - } - if e, ok := err.(transport.StreamError); ok { - statusCode = e.Code - statusDesc = e.Desc - } else { - statusCode = codes.Unknown - statusDesc = err.Error() + return err + } + statusCode := codes.OK + statusDesc := "" + df := func(v interface{}) error { + if pf == compressionMade { + var err error + req, err = s.opts.dc.Do(bytes.NewReader(req)) + if err != nil { + if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) + } + return err } } - t.WriteStatus(stream, statusCode, statusDesc) - default: - panic(fmt.Sprintf("payload format to be supported: %d", pf)) + if err := s.opts.codec.Unmarshal(req, v); err != nil { + return err + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + reply, appErr := md.Handler(srv.server, stream.Context(), df) + if appErr != nil { + if err, ok := appErr.(rpcError); ok { + statusCode = err.code + statusDesc = err.desc + } else { + statusCode = convertCode(appErr) + statusDesc = appErr.Error() + } + if trInfo != nil && statusCode != codes.OK { + trInfo.tr.LazyLog(stringer(statusDesc), true) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + return err + } + return nil + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + if s.opts.cp != nil { + stream.SetSendCompress(s.opts.cp.Type()) + } + if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { + switch err := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + statusCode = err.Code + statusDesc = err.Desc + default: + statusCode = codes.Unknown + statusDesc = err.Error() + } + return err + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } + return t.WriteStatus(stream, statusCode, statusDesc) } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc) { +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + if s.opts.cp != nil { + stream.SetSendCompress(s.opts.cp.Type()) + } ss := &serverStream{ - t: t, - s: stream, - p: &parser{s: stream}, - codec: s.opts.codec, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.opts.codec, + cp: s.opts.cp, + dc: s.opts.dc, + trInfo: trInfo, + } + if ss.cp != nil { + ss.cbuf = new(bytes.Buffer) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + }() } if appErr := sd.Handler(srv.server, ss); appErr != nil { if err, ok := appErr.(rpcError); ok { ss.statusCode = err.code ss.statusDesc = err.desc + } else if err, ok := appErr.(transport.StreamError); ok { + ss.statusCode = err.Code + ss.statusDesc = err.Desc } else { ss.statusCode = convertCode(appErr) ss.statusDesc = appErr.Error() } } - t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) + if trInfo != nil { + ss.mu.Lock() + if ss.statusCode != codes.OK { + ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true) + ss.trInfo.tr.SetError() + } else { + ss.trInfo.tr.LazyLog(stringer("OK"), false) + } + ss.mu.Unlock() + } + return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) + } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } + if trInfo != nil { + trInfo.tr.Finish() + } return } service := sm[:pos] method := sm[pos+1:] srv, ok := s.m[service] if !ok { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) + trInfo.tr.SetError() + } if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } + if trInfo != nil { + trInfo.tr.Finish() + } return } // Unary RPC or Streaming RPC? if md, ok := srv.md[method]; ok { - s.processUnaryRPC(t, stream, srv, md) + s.processUnaryRPC(t, stream, srv, md, trInfo) return } if sd, ok := srv.sd[method]; ok { - s.processStreamingRPC(t, stream, srv, sd) + s.processStreamingRPC(t, stream, srv, sd, trInfo) return } + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) + trInfo.tr.SetError() + } if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) } + if trInfo != nil { + trInfo.tr.Finish() + } } -// Stop stops the gRPC server. Once Stop returns, the server stops accepting -// connection requests and closes all the connected connections. +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. func (s *Server) Stop() { s.mu.Lock() listeners := s.lis @@ -372,22 +678,39 @@ func (s *Server) Stop() { cs := s.conns s.conns = nil s.mu.Unlock() + for lis := range listeners { lis.Close() } for c := range cs { c.Close() } + + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +func init() { + internal.TestingCloseConns = func(arg interface{}) { + arg.(*Server).testingCloseConns() + } + internal.TestingUseHandlerImpl = func(arg interface{}) { + arg.(*Server).opts.useHandlerImpl = true + } } -// TestingCloseConns closes all exiting transports but keeps s.lis accepting new -// connections. This is for test only now. -func (s *Server) TestingCloseConns() { +// testingCloseConns closes all existing transports but keeps s.lis +// accepting new connections. +func (s *Server) testingCloseConns() { s.mu.Lock() for c := range s.conns { c.Close() + delete(s.conns, c) } - s.conns = make(map[transport.ServerTransport]bool) s.mu.Unlock() } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/stream.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/stream.go index 31d88e3cd139..dba7f6c4204d 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/stream.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/stream.go @@ -34,8 +34,10 @@ package grpc import ( + "bytes" "errors" "io" + "sync" "time" "golang.org/x/net/context" @@ -69,7 +71,7 @@ type Stream interface { SendMsg(m interface{}) error // RecvMsg blocks until it receives a message or the stream is // done. On client side, it returns io.EOF when the stream is done. On - // any other error, it aborts the streama nd returns an RPC status. On + // any other error, it aborts the stream and returns an RPC status. On // server side, it simply returns the error to the caller. RecvMsg(m interface{}) error } @@ -95,45 +97,84 @@ type ClientStream interface { // NewClientStream creates a new Stream for the client side. This is called // by generated code. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + var ( + t transport.ClientTransport + err error + ) + t, err = cc.dopts.picker.Pick(ctx) + if err != nil { + return nil, toRPCErr(err) + } // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, + Flush: desc.ServerStreams && desc.ClientStreams, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() } cs := &clientStream{ - desc: desc, - codec: cc.dopts.codec, + desc: desc, + codec: cc.dopts.codec, + cp: cc.dopts.cp, + dc: cc.dopts.dc, + tracing: EnableTracing, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cs.cbuf = new(bytes.Buffer) } - if EnableTracing { - cs.traceInfo.tr = trace.New("Sent."+methodFamily(method), method) - cs.traceInfo.firstLine.client = true + if cs.tracing { + cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + cs.trInfo.firstLine.client = true if deadline, ok := ctx.Deadline(); ok { - cs.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) + cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) } - cs.traceInfo.tr.LazyLog(&cs.traceInfo.firstLine, false) - } - t, _, err := cc.wait(ctx, 0) - if err != nil { - return nil, toRPCErr(err) + cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) + ctx = trace.NewContext(ctx, cs.trInfo.tr) } s, err := t.NewStream(ctx, callHdr) if err != nil { + cs.finish(err) return nil, toRPCErr(err) } cs.t = t cs.s = s - cs.p = &parser{s: s} + cs.p = &parser{r: s} + // Listen on ctx.Done() to detect cancellation when there is no pending + // I/O operations on this stream. + go func() { + select { + case <-t.Error(): + // Incur transport error, simply exit. + case <-s.Context().Done(): + err := s.Context().Err() + cs.finish(err) + cs.closeTransportStream(transport.ContextErr(err)) + } + }() return cs, nil } // clientStream implements a client side Stream. type clientStream struct { - t transport.ClientTransport - s *transport.Stream - p *parser - desc *StreamDesc - codec Codec - traceInfo traceInfo + t transport.ClientTransport + s *transport.Stream + p *parser + desc *StreamDesc + codec Codec + cp Compressor + cbuf *bytes.Buffer + dc Decompressor + + tracing bool // set to EnableTracing when the clientStream is created. + + mu sync.Mutex + closed bool + // trInfo.tr is set when the clientStream is created (if EnableTracing is true), + // and is set to nil when the clientStream's finish method is called. + trInfo traceInfo } func (cs *clientStream) Context() context.Context { @@ -144,7 +185,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { m, err := cs.s.Header() if err != nil { if _, ok := err.(transport.ConnectionError); !ok { - cs.t.CloseStream(cs.s, err) + cs.closeTransportStream(err) } } return m, err @@ -155,16 +196,31 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) SendMsg(m interface{}) (err error) { + if cs.tracing { + cs.mu.Lock() + if cs.trInfo.tr != nil { + cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + cs.mu.Unlock() + } defer func() { + if err != nil { + cs.finish(err) + } if err == nil || err == io.EOF { return } if _, ok := err.(transport.ConnectionError); !ok { - cs.t.CloseStream(cs.s, err) + cs.closeTransportStream(err) } err = toRPCErr(err) }() - out, err := encode(cs.codec, m, compressionNone) + out, err := encode(cs.codec, m, cs.cp, cs.cbuf) + defer func() { + if cs.cbuf != nil { + cs.cbuf.Reset() + } + }() if err != nil { return transport.StreamErrorf(codes.Internal, "grpc: %v", err) } @@ -172,29 +228,33 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { } func (cs *clientStream) RecvMsg(m interface{}) (err error) { - err = recv(cs.p, cs.codec, m) + err = recv(cs.p, cs.codec, cs.s, cs.dc, m) defer func() { // err != nil indicates the termination of the stream. - if EnableTracing && err != nil { - if err != io.EOF { - cs.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - cs.traceInfo.tr.SetError() - } - cs.traceInfo.tr.Finish() + if err != nil { + cs.finish(err) } }() if err == nil { + if cs.tracing { + cs.mu.Lock() + if cs.trInfo.tr != nil { + cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + cs.mu.Unlock() + } if !cs.desc.ClientStreams || cs.desc.ServerStreams { return } // Special handling for client streaming rpc. - err = recv(cs.p, cs.codec, m) - cs.t.CloseStream(cs.s, err) + err = recv(cs.p, cs.codec, cs.s, cs.dc, m) + cs.closeTransportStream(err) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } if err == io.EOF { if cs.s.StatusCode() == codes.OK { + cs.finish(err) return nil } return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) @@ -202,7 +262,7 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { return toRPCErr(err) } if _, ok := err.(transport.ConnectionError); !ok { - cs.t.CloseStream(cs.s, err) + cs.closeTransportStream(err) } if err == io.EOF { if cs.s.StatusCode() == codes.OK { @@ -216,16 +276,50 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { func (cs *clientStream) CloseSend() (err error) { err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) + defer func() { + if err != nil { + cs.finish(err) + } + }() if err == nil || err == io.EOF { return } if _, ok := err.(transport.ConnectionError); !ok { - cs.t.CloseStream(cs.s, err) + cs.closeTransportStream(err) } err = toRPCErr(err) return } +func (cs *clientStream) closeTransportStream(err error) { + cs.mu.Lock() + if cs.closed { + cs.mu.Unlock() + return + } + cs.closed = true + cs.mu.Unlock() + cs.t.CloseStream(cs.s, err) +} + +func (cs *clientStream) finish(err error) { + if !cs.tracing { + return + } + cs.mu.Lock() + defer cs.mu.Unlock() + if cs.trInfo.tr != nil { + if err == nil || err == io.EOF { + cs.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + cs.trInfo.tr.LazyPrintf("RPC: [%v]", err) + cs.trInfo.tr.SetError() + } + cs.trInfo.tr.Finish() + cs.trInfo.tr = nil + } +} + // ServerStream defines the interface a server stream has to satisfy. type ServerStream interface { // SendHeader sends the header metadata. It should not be called @@ -244,8 +338,14 @@ type serverStream struct { s *transport.Stream p *parser codec Codec + cp Compressor + dc Decompressor + cbuf *bytes.Buffer statusCode codes.Code statusDesc string + trInfo *traceInfo + + mu sync.Mutex // protects trInfo.tr after the service handler runs. } func (ss *serverStream) Context() context.Context { @@ -264,8 +364,27 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { return } -func (ss *serverStream) SendMsg(m interface{}) error { - out, err := encode(ss.codec, m, compressionNone) +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + }() + out, err := encode(ss.codec, m, ss.cp, ss.cbuf) + defer func() { + if ss.cbuf != nil { + ss.cbuf.Reset() + } + }() if err != nil { err = transport.StreamErrorf(codes.Internal, "grpc: %v", err) return err @@ -273,6 +392,20 @@ func (ss *serverStream) SendMsg(m interface{}) error { return ss.t.Write(ss.s, out, &transport.Options{Last: false}) } -func (ss *serverStream) RecvMsg(m interface{}) error { - return recv(ss.p, ss.codec, m) +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + }() + return recv(ss.p, ss.codec, ss.s, ss.dc, m) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/trace.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/trace.go index f8df478dfc1d..cde04fbfc9e6 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/trace.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/trace.go @@ -93,12 +93,17 @@ func (f *firstLine) String() string { // payload represents an RPC request or response payload. type payload struct { - m interface{} // e.g. a proto.Message + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } func (p payload) String() string { - return fmt.Sprint(p.m) + if p.sent { + return fmt.Sprintf("sent: %v", p.msg) + } else { + return fmt.Sprintf("recv: %v", p.msg) + } } type fmtStringer struct { @@ -109,3 +114,7 @@ type fmtStringer struct { func (f *fmtStringer) String() string { return fmt.Sprintf(f.format, f.a...) } + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/control.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/control.go index 306b5851828e..f6b38a5a6d27 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/control.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/control.go @@ -37,7 +37,7 @@ import ( "fmt" "sync" - "github.com/bradfitz/http2" + "golang.org/x/net/http2" ) const ( @@ -61,8 +61,8 @@ func (windowUpdate) isItem() bool { } type settings struct { - ack bool - setting []http2.Setting + ack bool + ss []http2.Setting } func (settings) isItem() bool { @@ -86,7 +86,8 @@ func (flushIO) isItem() bool { } type ping struct { - ack bool + ack bool + data [8]byte } func (ping) isItem() bool { @@ -104,8 +105,14 @@ type quotaPool struct { // newQuotaPool creates a quotaPool which has quota q available to consume. func newQuotaPool(q int) *quotaPool { - qb := "aPool{c: make(chan int, 1)} - qb.c <- q + qb := "aPool{ + c: make(chan int, 1), + } + if q > 0 { + qb.c <- q + } else { + qb.quota = q + } return qb } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/handler_server.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/handler_server.go new file mode 100644 index 000000000000..d7e18a0b6588 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/handler_server.go @@ -0,0 +1,377 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + if !strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + if _, ok := w.(http.CloseNotifier); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := timeoutDecode(v) + if err != nil { + return nil, StreamErrorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + var metakv []string + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) { + continue + } + for _, v := range vv { + if k == "user-agent" { + // user-agent is special. Copying logic of http_util.go. + if i := strings.LastIndex(v, " "); i == -1 { + // There is no application user agent string being set + continue + } else { + v = v[:i] + } + } + metakv = append(metakv, k, v) + + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + didCommonHeaders bool + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { + err := ht.do(func() { + ht.writeCommonHeaders(s) + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode)) + if statusDesc != "" { + h.Set("Grpc-Message", statusDesc) + } + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + for _, v := range vv { + // http2 ResponseWriter mechanism to + // send undeclared Trailers after the + // headers have possibly been written. + h.Add(http2.TrailerPrefix+k, v) + } + } + } + }) + close(ht.writes) + return err +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + if ht.didCommonHeaders { + return + } + ht.didCommonHeaders = true + + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", "application/grpc") + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + ht.rw.Write(data) + if !opts.Delay { + ht.rw.(http.Flusher).Flush() + } + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + h := ht.rw.Header() + for k, vv := range md { + for _, v := range vv { + h.Add(k, v) + } + } + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + var ctx context.Context + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(context.Background(), ht.timeout) + } else { + ctx, cancel = context.WithCancel(context.Background()) + } + + // requestOver is closed when either the request's context is done + // or the status has been written via WriteStatus. + requestOver := make(chan struct{}) + + // clientGone receives a single value if peer is gone, either + // because the underlying connection is dead or because the + // peer sends an http2 RST_STREAM. + clientGone := ht.rw.(http.CloseNotifier).CloseNotify() + go func() { + select { + case <-requestOver: + return + case <-ht.closedCh: + case <-clientGone: + } + cancel() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + windowHandler: func(int) {}, // nothing + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{*req.TLS} + } + ctx = metadata.NewContext(ctx, ht.headerMD) + ctx = peer.NewContext(ctx, pr) + s.ctx = newContextWithStream(ctx, s) + s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + for { + buf := make([]byte, 1024) // TODO: minimize garbage, optimize recvBuffer code/ownership + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(&recvMsg{data: buf[:n]}) + } + if err != nil { + s.buf.put(&recvMsg{err: mapRecvMsgError(err)}) + return + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn, ok := <-ht.writes: + if !ok { + return + } + fn() + case <-ht.closedCh: + return + } + } +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return StreamError{ + Code: code, + Desc: se.Error(), + } + } + } + return ConnectionError{Desc: err.Error()} +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_client.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_client.go index 169a80ddd813..66fabbba7d28 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_client.go @@ -39,23 +39,27 @@ import ( "io" "math" "net" + "strings" "sync" "time" - "github.com/bradfitz/http2" - "github.com/bradfitz/http2/hpack" "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" ) // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - target string // server name/addr - conn net.Conn // underlying communication channel - nextID uint32 // the next stream ID to be used + target string // server name/addr + userAgent string + conn net.Conn // underlying communication channel + authInfo credentials.AuthInfo // auth info about the connection + nextID uint32 // the next stream ID to be used // writableChan synchronizes write access to the transport. // A writer acquires the write lock by sending a value on writableChan @@ -113,6 +117,7 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e if connErr != nil { return nil, ConnectionErrorf("transport: %v", connErr) } + var authInfo credentials.AuthInfo for _, c := range opts.AuthOptions { if ccreds, ok := c.(credentials.TransportAuthenticator); ok { scheme = "https" @@ -123,7 +128,7 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e if timeout > 0 { timeout -= time.Since(startT) } - conn, connErr = ccreds.ClientHandshake(addr, conn, timeout) + conn, authInfo, connErr = ccreds.ClientHandshake(addr, conn, timeout) break } } @@ -158,10 +163,16 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e return nil, ConnectionErrorf("transport: %v", err) } } + ua := primaryUA + if opts.UserAgent != "" { + ua = opts.UserAgent + " " + ua + } var buf bytes.Buffer t := &http2Client{ - target: addr, - conn: conn, + target: addr, + userAgent: ua, + conn: conn, + authInfo: authInfo, // The client initiated stream id is odd starting from 1. nextID: 1, writableChan: make(chan int, 1), @@ -190,7 +201,7 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e return t, nil } -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr, sq bool) *Stream { +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { fc := &inFlow{ limit: initialWindowSize, conn: t.fc, @@ -199,8 +210,8 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr, sq bool) s := &Stream{ id: t.nextID, method: callHdr.Method, + sendCompress: callHdr.SendCompress, buf: newRecvBuffer(), - updateStreams: sq, fc: fc, sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), headerChan: make(chan struct{}), @@ -229,9 +240,30 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return nil, ContextErr(context.DeadlineExceeded) } } + pr := &peer.Peer{ + Addr: t.conn.RemoteAddr(), + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + ctx = peer.NewContext(ctx, pr) authData := make(map[string]string) for _, c := range t.authCreds { - data, err := c.GetRequestMetadata(ctx) + // Construct URI required to get auth request metadata. + var port string + if pos := strings.LastIndex(t.target, ":"); pos != -1 { + // Omit port if it is the default one. + if t.target[pos+1:] != "443" { + port = ":" + t.target[pos+1:] + } + } + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) + } + audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] + data, err := c.GetRequestMetadata(ctx, audience) if err != nil { return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err) } @@ -261,9 +293,24 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return nil, err } t.mu.Lock() - s := t.newStream(ctx, callHdr, checkStreamsQuota) + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + s := t.newStream(ctx, callHdr) t.activeStreams[s.id] = s + + // This stream is not counted when applySetings(...) initialize t.streamsQuota. + // Reset t.streamsQuota to the right value. + var reset bool + if !checkStreamsQuota && t.streamsQuota != nil { + reset = true + } t.mu.Unlock() + if reset { + t.streamsQuota.reset(-1) + } + // HPACK encodes various headers. Note that once WriteField(...) is // called, the corresponding headers/continuation frame has to be sent // because hpack.Encoder is stateful. @@ -273,7 +320,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) + + if callHdr.SendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + } if timeout > 0 { t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)}) } @@ -287,7 +339,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if md, ok := metadata.FromContext(ctx); ok { hasMD = true for k, v := range md { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } } } first := true @@ -299,6 +353,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } else { endHeaders = true } + var flush bool + if endHeaders && (hasMD || callHdr.Flush) { + flush = true + } if first { // Sends a HeadersFrame to server to start a new stream. p := http2.HeadersFrameParam{ @@ -310,11 +368,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea // Do a force flush for the buffered frames iff it is the last headers frame // and there is header metadata to be sent. Otherwise, there is flushing until // the corresponding data frame is written. - err = t.framer.writeHeaders(hasMD && endHeaders, p) + err = t.framer.writeHeaders(flush, p) first = false } else { // Sends Continuation frames for the leftover headers. - err = t.framer.writeContinuation(hasMD && endHeaders, s.id, endHeaders, t.hBuf.Next(size)) + err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size)) } if err != nil { t.notifyError(err) @@ -328,12 +386,21 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea // CloseStream clears the footprint of a stream when the stream is not needed any more. // This must not be executed in reader's goroutine. func (t *http2Client) CloseStream(s *Stream, err error) { + var updateStreams bool t.mu.Lock() + if t.streamsQuota != nil { + updateStreams = true + } delete(t.activeStreams, s.id) t.mu.Unlock() - if s.updateStreams { + if updateStreams { t.streamsQuota.add(1) } + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), the caller needs + // to call cancel on the stream to interrupt the blocking on + // other goroutines. + s.cancel() s.mu.Lock() if q := s.fc.restoreConn(); q > 0 { t.controlBuf.put(&windowUpdate{0, q}) @@ -348,11 +415,6 @@ func (t *http2Client) CloseStream(s *Stream, err error) { } s.state = streamDone s.mu.Unlock() - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), the caller needs - // to call cancel on the stream to interrupt the blocking on - // other goroutines. - s.cancel() if _, ok := err.(StreamError); ok { t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel}) } @@ -488,14 +550,8 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { t.mu.Lock() defer t.mu.Unlock() - if t.activeStreams == nil { - // The transport is closing. - return nil, false - } - if s, ok := t.activeStreams[f.Header().StreamID]; ok { - return s, true - } - return nil, false + s, ok := t.activeStreams[f.Header().StreamID] + return s, ok } // updateWindow adjusts the inbound quota for the stream and the transport. @@ -518,30 +574,46 @@ func (t *http2Client) handleData(f *http2.DataFrame) { return } size := len(f.Data()) - if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - t.notifyError(err) + if size > 0 { + if err := s.fc.onData(uint32(size)); err != nil { + if _, ok := err.(ConnectionError); ok { + t.notifyError(err) + return + } + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.state = streamDone + s.statusCode = codes.Internal + s.statusDesc = err.Error() + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + data := make([]byte, size) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return + if s.state == streamWriteDone { + s.state = streamDone + } else { + s.state = streamReadDone } - s.state = streamDone s.statusCode = codes.Internal - s.statusDesc = err.Error() + s.statusDesc = "server closed the stream without sending trailers" s.mu.Unlock() s.write(recvMsg{err: io.EOF}) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) - return } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - data := make([]byte, size) - copy(data, f.Data()) - s.write(recvMsg{data: data}) } func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { @@ -555,7 +627,11 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { return } s.state = streamDone - s.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)] + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)] if !ok { grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) } @@ -567,48 +643,23 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame) { if f.IsAck() { return } + var ss []http2.Setting f.ForeachSetting(func(s http2.Setting) error { - if v, ok := f.Value(s.ID); ok { - switch s.ID { - case http2.SettingMaxConcurrentStreams: - // TODO(zhaoq): This is a hack to avoid significant refactoring of the - // code to deal with the unrealistic int32 overflow. Probably will try - // to find a better way to handle this later. - if v > math.MaxInt32 { - v = math.MaxInt32 - } - t.mu.Lock() - reset := t.streamsQuota != nil - if !reset { - t.streamsQuota = newQuotaPool(int(v)) - } - ms := t.maxStreams - t.maxStreams = int(v) - t.mu.Unlock() - if reset { - t.streamsQuota.reset(int(v) - ms) - } - case http2.SettingInitialWindowSize: - t.mu.Lock() - for _, s := range t.activeStreams { - // Adjust the sending quota for each s. - s.sendQuotaPool.reset(int(v - t.streamSendQuota)) - } - t.streamSendQuota = v - t.mu.Unlock() - } - } + ss = append(ss, s) return nil }) - t.controlBuf.put(&settings{ack: true}) + // The settings will be applied once the ack is sent. + t.controlBuf.put(&settings{ack: true, ss: ss}) } func (t *http2Client) handlePing(f *http2.PingFrame) { - t.controlBuf.put(&ping{true}) + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) } func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { - // TODO(zhaoq): GoAwayFrame handler to be implemented" + // TODO(zhaoq): GoAwayFrame handler to be implemented } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { @@ -623,52 +674,59 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { } } -// operateHeader takes action on the decoded headers. It returns the current -// stream if there are remaining headers on the wire (in the following -// Continuation frame). -func (t *http2Client) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool) (pendingStream *Stream) { - defer func() { - if pendingStream == nil { - hDec.state = decodeState{} - } - }() - endHeaders, err := hDec.decodeClientHTTP2Headers(frame) - if s == nil { - // s has been closed. - return nil +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s, ok := t.getStream(frame) + if !ok { + return } - if err != nil { - s.write(recvMsg{err: err}) - // Something wrong. Stops reading even when there is remaining. - return nil + var state decodeState + for _, hf := range frame.Fields { + state.processHeaderField(hf) } - if !endHeaders { - return s + if state.err != nil { + s.write(recvMsg{err: state.err}) + // Something wrong. Stops reading even when there is remaining. + return } + endStream := frame.StreamEnded() + s.mu.Lock() + if !endStream { + s.recvCompress = state.encoding + } if !s.headerDone { - if !endStream && len(hDec.state.mdata) > 0 { - s.header = hDec.state.mdata + if !endStream && len(state.mdata) > 0 { + s.header = state.mdata } close(s.headerChan) s.headerDone = true } if !endStream || s.state == streamDone { s.mu.Unlock() - return nil + return } - if len(hDec.state.mdata) > 0 { - s.trailer = hDec.state.mdata + if len(state.mdata) > 0 { + s.trailer = state.mdata } s.state = streamDone - s.statusCode = hDec.state.statusCode - s.statusDesc = hDec.state.statusDesc + s.statusCode = state.statusCode + s.statusDesc = state.statusDesc s.mu.Unlock() s.write(recvMsg{err: io.EOF}) - return nil +} + +func handleMalformedHTTP2(s *Stream, err http2.StreamError) { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: StreamErrorf(http2ErrConvTab[err.Code], "%v", err)}) } // reader runs as a separate goroutine in charge of reading data from network @@ -691,25 +749,30 @@ func (t *http2Client) reader() { } t.handleSettings(sf) - hDec := newHPACKDecoder() - var curStream *Stream // loop to keep reading incoming messages on this transport. for { frame, err := t.framer.readFrame() if err != nil { - t.notifyError(err) - return + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + handleMalformedHTTP2(s, se) + } + continue + } else { + // Transport error. + t.notifyError(err) + return + } } switch frame := frame.(type) { - case *http2.HeadersFrame: - // operateHeaders has to be invoked regardless the value of curStream - // because the HPACK decoder needs to be updated using the received - // headers. - curStream, _ = t.getStream(frame) - endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) - curStream = t.operateHeaders(hDec, curStream, frame, endStream) - case *http2.ContinuationFrame: - curStream = t.operateHeaders(hDec, curStream, frame, false) + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: @@ -728,6 +791,39 @@ func (t *http2Client) reader() { } } +func (t *http2Client) applySettings(ss []http2.Setting) { + for _, s := range ss { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + // TODO(zhaoq): This is a hack to avoid significant refactoring of the + // code to deal with the unrealistic int32 overflow. Probably will try + // to find a better way to handle this later. + if s.Val > math.MaxInt32 { + s.Val = math.MaxInt32 + } + t.mu.Lock() + reset := t.streamsQuota != nil + if !reset { + t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams)) + } + ms := t.maxStreams + t.maxStreams = int(s.Val) + t.mu.Unlock() + if reset { + t.streamsQuota.reset(int(s.Val) - ms) + } + case http2.SettingInitialWindowSize: + t.mu.Lock() + for _, stream := range t.activeStreams { + // Adjust the sending quota for each stream. + stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) + } + t.streamSendQuota = s.Val + t.mu.Unlock() + } + } +} + // controller running in a separate goroutine takes charge of sending control // frames (e.g., window update, reset stream, setting, etc.) to the server. func (t *http2Client) controller() { @@ -743,17 +839,16 @@ func (t *http2Client) controller() { case *settings: if i.ack { t.framer.writeSettingsAck(true) + t.applySettings(i.ss) } else { - t.framer.writeSettings(true, i.setting...) + t.framer.writeSettings(true, i.ss...) } case *resetStream: t.framer.writeRSTStream(true, i.streamID, i.code) case *flushIO: t.framer.flushWrite() case *ping: - // TODO(zhaoq): Ack with all-0 data now. will change to some - // meaningful content when this is actually in use. - t.framer.writePing(true, i.ack, [8]byte{}) + t.framer.writePing(true, i.ack, i.data) default: grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i) } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_server.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_server.go index d9c0e11174a4..cec441cfc7a6 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_server.go @@ -42,12 +42,14 @@ import ( "strconv" "sync" - "github.com/bradfitz/http2" - "github.com/bradfitz/http2/hpack" "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" ) // ErrIllegalHeaderWrite indicates that setting header is illegal because of @@ -57,10 +59,11 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { conn net.Conn - maxStreamID uint32 // max stream ID ever seen + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by sending a value on writableChan - // and releases it by receiving from writableChan. + // A writer acquires the write lock by receiving a value on writableChan + // and releases it by sending on writableChan. writableChan chan int // shutdownChan is closed when Close is called. // Blocking operations should select on shutdownChan to avoid @@ -88,11 +91,9 @@ type http2Server struct { // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is // returned if something goes wrong. -func newHTTP2Server(conn net.Conn, maxStreams uint32) (_ ServerTransport, err error) { +func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) { framer := newFramer(conn) // Send initial settings as connection preface to client. - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. var settings []http2.Setting // TODO(zhaoq): Have a better way to signal "no limit" because 0 is // permitted in the HTTP2 spec. @@ -116,6 +117,7 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32) (_ ServerTransport, err er var buf bytes.Buffer t := &http2Server{ conn: conn, + authInfo: authInfo, framer: framer, hBuf: &buf, hEnc: hpack.NewEncoder(&buf), @@ -134,43 +136,73 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32) (_ ServerTransport, err er return t, nil } -// operateHeader takes action on the decoded headers. It returns the current -// stream if there are remaining headers on the wire (in the following -// Continuation frame). -func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream), wg *sync.WaitGroup) (pendingStream *Stream) { - defer func() { - if pendingStream == nil { - hDec.state = decodeState{} - } - }() - endHeaders, err := hDec.decodeServerHTTP2Headers(frame) - if s == nil { - // s has been closed. - return nil +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) { + buf := newRecvBuffer() + fc := &inFlow{ + limit: initialWindowSize, + conn: t.fc, + } + s := &Stream{ + id: frame.Header().StreamID, + st: t, + buf: buf, + fc: fc, } - if err != nil { - grpclog.Printf("transport: http2Server.operateHeader found %v", err) + + var state decodeState + for _, hf := range frame.Fields { + state.processHeaderField(hf) + } + if err := state.err; err != nil { if se, ok := err.(StreamError); ok { t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) } - return nil + return } - if endStream { + + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if !endHeaders { - return s + s.recvCompress = state.encoding + if state.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout) + } else { + s.ctx, s.cancel = context.WithCancel(context.TODO()) + } + pr := &peer.Peer{ + Addr: t.conn.RemoteAddr(), + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Cache the current stream to the context so that the server application + // can find out. Required when the server wants to send some metadata + // back to the client (unary call only). + s.ctx = newContextWithStream(s.ctx, s) + // Attach the received metadata to the context. + if len(state.mdata) > 0 { + s.ctx = metadata.NewContext(s.ctx, state.mdata) } + + s.dec = &recvBufferReader{ + ctx: s.ctx, + recv: s.buf, + } + s.recvCompress = state.encoding + s.method = state.method t.mu.Lock() if t.state != reachable { t.mu.Unlock() - return nil + return } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) - return nil + return } s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) t.activeStreams[s.id] = s @@ -178,32 +210,7 @@ func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame header s.windowHandler = func(n int) { t.updateWindow(s, uint32(n)) } - if hDec.state.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout) - } else { - s.ctx, s.cancel = context.WithCancel(context.TODO()) - } - // Cache the current stream to the context so that the server application - // can find out. Required when the server wants to send some metadata - // back to the client (unary call only). - s.ctx = newContextWithStream(s.ctx, s) - // Attach the received metadata to the context. - if len(hDec.state.mdata) > 0 { - s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata) - } - - s.dec = &recvBufferReader{ - ctx: s.ctx, - recv: s.buf, - } - s.method = hDec.state.method - - wg.Add(1) - go func() { - handle(s) - wg.Done() - }() - return nil + handle(s) } // HandleStreams receives incoming streams using the given handler. This is @@ -236,10 +243,6 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { } t.handleSettings(sf) - hDec := newHPACKDecoder() - var curStream *Stream - var wg sync.WaitGroup - defer wg.Wait() for { frame, err := t.framer.readFrame() if err != nil { @@ -247,7 +250,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { return } switch frame := frame.(type) { - case *http2.HeadersFrame: + case *http2.MetaHeadersFrame: id := frame.Header().StreamID if id%2 != 1 || id <= t.maxStreamID { // illegal gRPC stream id. @@ -256,21 +259,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { break } t.maxStreamID = id - buf := newRecvBuffer() - fc := &inFlow{ - limit: initialWindowSize, - conn: t.fc, - } - curStream = &Stream{ - id: frame.Header().StreamID, - st: t, - buf: buf, - fc: fc, - } - endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) - curStream = t.operateHeaders(hDec, curStream, frame, endStream, handle, &wg) - case *http2.ContinuationFrame: - curStream = t.operateHeaders(hDec, curStream, frame, false, handle, &wg) + t.operateHeaders(frame, handle) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: @@ -324,22 +313,24 @@ func (t *http2Server) handleData(f *http2.DataFrame) { return } size := len(f.Data()) - if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - grpclog.Printf("transport: http2Server %v", err) - t.Close() + if size > 0 { + if err := s.fc.onData(uint32(size)); err != nil { + if _, ok := err.(ConnectionError); ok { + grpclog.Printf("transport: http2Server %v", err) + t.Close() + return + } + t.closeStream(s) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } - t.closeStream(s) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) - return + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + data := make([]byte, size) + copy(data, f.Data()) + s.write(recvMsg{data: data}) } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - data := make([]byte, size) - copy(data, f.Data()) - s.write(recvMsg{data: data}) if f.Header().Flags.Has(http2.FlagDataEndStream) { // Received the end of stream from the client. s.mu.Lock() @@ -367,22 +358,19 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { if f.IsAck() { return } + var ss []http2.Setting f.ForeachSetting(func(s http2.Setting) error { - if v, ok := f.Value(http2.SettingInitialWindowSize); ok { - t.mu.Lock() - defer t.mu.Unlock() - for _, s := range t.activeStreams { - s.sendQuotaPool.reset(int(v - t.streamSendQuota)) - } - t.streamSendQuota = v - } + ss = append(ss, s) return nil }) - t.controlBuf.put(&settings{ack: true}) + // The settings will be applied once the ack is sent. + t.controlBuf.put(&settings{ack: true, ss: ss}) } func (t *http2Server) handlePing(f *http2.PingFrame) { - t.controlBuf.put(&ping{true}) + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) } func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { @@ -444,8 +432,13 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { t.hBuf.Reset() t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if s.sendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } for k, v := range md { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } } if err := t.writeHeaders(s, t.hBuf, false); err != nil { return err @@ -459,17 +452,24 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { - s.mu.RLock() + var headersSent bool + s.mu.Lock() if s.state == streamDone { - s.mu.RUnlock() + s.mu.Unlock() return nil } - s.mu.RUnlock() + if s.headerOk { + headersSent = true + } + s.mu.Unlock() if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { return err } t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + if !headersSent { + t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + } t.hEnc.WriteField( hpack.HeaderField{ Name: "grpc-status", @@ -478,7 +478,9 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc}) // Attach the trailer metadata. for k, v := range s.trailer { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } } if err := t.writeHeaders(s, t.hBuf, true); err != nil { t.Close() @@ -507,6 +509,9 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { t.hBuf.Reset() t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if s.sendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } p := http2.HeadersFrameParam{ StreamID: s.id, BlockFragment: t.hBuf.Bytes(), @@ -584,6 +589,20 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { } +func (t *http2Server) applySettings(ss []http2.Setting) { + for _, s := range ss { + if s.ID == http2.SettingInitialWindowSize { + t.mu.Lock() + defer t.mu.Unlock() + for _, stream := range t.activeStreams { + stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) + } + t.streamSendQuota = s.Val + } + + } +} + // controller running in a separate goroutine takes charge of sending control // frames (e.g., window update, reset stream, setting, etc.) to the server. func (t *http2Server) controller() { @@ -599,17 +618,16 @@ func (t *http2Server) controller() { case *settings: if i.ack { t.framer.writeSettingsAck(true) + t.applySettings(i.ss) } else { - t.framer.writeSettings(true, i.setting...) + t.framer.writeSettings(true, i.ss...) } case *resetStream: t.framer.writeRSTStream(true, i.streamID, i.code) case *flushIO: t.framer.flushWrite() case *ping: - // TODO(zhaoq): Ack with all-0 data now. will change to some - // meaningful content when this is actually in use. - t.framer.writePing(true, i.ack, [8]byte{}) + t.framer.writePing(true, i.ack, i.data) default: grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i) } @@ -639,9 +657,9 @@ func (t *http2Server) Close() (err error) { t.mu.Unlock() close(t.shutdownChan) err = t.conn.Close() - // Notify all active streams. + // Cancel all active streams. for _, s := range streams { - s.write(recvMsg{err: ErrConnClosing}) + s.cancel() } return } @@ -663,8 +681,11 @@ func (t *http2Server) closeStream(s *Stream) { s.state = streamDone s.mu.Unlock() // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), the caller needs - // to call cancel on the stream to interrupt the blocking on - // other goroutines. + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. s.cancel() } + +func (t *http2Server) RemoteAddr() net.Addr { + return t.conn.RemoteAddr() +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http_util.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http_util.go index 2babe729c2d8..6aabcd4a362d 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http_util.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http_util.go @@ -39,17 +39,20 @@ import ( "io" "net" "strconv" + "strings" "sync/atomic" "time" - "github.com/bradfitz/http2" - "github.com/bradfitz/http2/hpack" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" ) const ( + // The primary user agent + primaryUA = "grpc-go/0.11" // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame // http://http2.github.io/http2-spec/#SettingValues @@ -59,35 +62,37 @@ const ( ) var ( - clientPreface = []byte(http2.ClientPreface) + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.FailedPrecondition, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } ) -var http2RSTErrConvTab = map[http2.ErrCode]codes.Code{ - http2.ErrCodeNo: codes.Internal, - http2.ErrCodeProtocol: codes.Internal, - http2.ErrCodeInternal: codes.Internal, - http2.ErrCodeFlowControl: codes.Internal, - http2.ErrCodeSettingsTimeout: codes.Internal, - http2.ErrCodeFrameSize: codes.Internal, - http2.ErrCodeRefusedStream: codes.Unavailable, - http2.ErrCodeCancel: codes.Canceled, - http2.ErrCodeCompression: codes.Internal, - http2.ErrCodeConnect: codes.Internal, - http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, - http2.ErrCodeInadequateSecurity: codes.PermissionDenied, -} - -var statusCodeConvTab = map[codes.Code]http2.ErrCode{ - codes.Internal: http2.ErrCodeInternal, // pick an arbitrary one which is matched. - codes.Canceled: http2.ErrCodeCancel, - codes.Unavailable: http2.ErrCodeRefusedStream, - codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, - codes.PermissionDenied: http2.ErrCodeInadequateSecurity, -} - // Records the states during HPACK decoding. Must be reset once the // decoding of the entire headers are finished. type decodeState struct { + err error // first error encountered decoding + + encoding string // statusCode caches the stream status received from the trailer // the server sent. Client side only. statusCode codes.Code @@ -97,28 +102,14 @@ type decodeState struct { timeout time.Duration method string // key-value metadata map from the peer. - mdata map[string]string -} - -// An hpackDecoder decodes HTTP2 headers which may span multiple frames. -type hpackDecoder struct { - h *hpack.Decoder - state decodeState - err error // The err when decoding -} - -// A headerFrame is either a http2.HeaderFrame or http2.ContinuationFrame. -type headerFrame interface { - Header() http2.FrameHeader - HeaderBlockFragment() []byte - HeadersEnded() bool + mdata map[string][]string } // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. func isReservedHeader(hdr string) bool { - if hdr[0] == ':' { + if hdr != "" && hdr[0] == ':' { return true } switch hdr { @@ -128,92 +119,69 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "te", - "user-agent": + "te": return true default: return false } } -func newHPACKDecoder() *hpackDecoder { - d := &hpackDecoder{} - d.h = hpack.NewDecoder(http2InitHeaderTableSize, func(f hpack.HeaderField) { - switch f.Name { - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.err = StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return +func (d *decodeState) setErr(err error) { + if d.err == nil { + d.err = err + } +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) { + switch f.Name { + case "content-type": + if !strings.Contains(f.Value, "application/grpc") { + d.setErr(StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)) + return + } + case "grpc-encoding": + d.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.setErr(StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)) + return + } + d.statusCode = codes.Code(code) + case "grpc-message": + d.statusDesc = f.Value + case "grpc-timeout": + d.timeoutSet = true + var err error + d.timeout, err = timeoutDecode(f.Value) + if err != nil { + d.setErr(StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err)) + return + } + case ":path": + d.method = f.Value + default: + if !isReservedHeader(f.Name) { + if f.Name == "user-agent" { + i := strings.LastIndex(f.Value, " ") + if i == -1 { + // There is no application user agent string being set. + return + } + // Extract the application user agent string. + f.Value = f.Value[:i] } - d.state.statusCode = codes.Code(code) - case "grpc-message": - d.state.statusDesc = f.Value - case "grpc-timeout": - d.state.timeoutSet = true - var err error - d.state.timeout, err = timeoutDecode(f.Value) + if d.mdata == nil { + d.mdata = make(map[string][]string) + } + k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) if err != nil { - d.err = StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err) + grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) return } - case ":path": - d.state.method = f.Value - default: - if !isReservedHeader(f.Name) { - if d.state.mdata == nil { - d.state.mdata = make(map[string]string) - } - k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) - if err != nil { - grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) - return - } - d.state.mdata[k] = v - } - } - }) - return d -} - -func (d *hpackDecoder) decodeClientHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { - d.err = nil - _, err = d.h.Write(frame.HeaderBlockFragment()) - if err != nil { - err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) - } - - if frame.HeadersEnded() { - if closeErr := d.h.Close(); closeErr != nil && err == nil { - err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) - } - endHeaders = true - } - - if err == nil && d.err != nil { - err = d.err - } - return -} - -func (d *hpackDecoder) decodeServerHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { - d.err = nil - _, err = d.h.Write(frame.HeaderBlockFragment()) - if err != nil { - err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) - } - - if frame.HeadersEnded() { - if closeErr := d.h.Close(); closeErr != nil && err == nil { - err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) + d.mdata[k] = append(d.mdata[k], v) } - endHeaders = true } - - if err == nil && d.err != nil { - err = d.err - } - return } type timeoutUnit uint8 @@ -304,10 +272,11 @@ type framer struct { func newFramer(conn net.Conn) *framer { f := &framer{ - reader: conn, + reader: bufio.NewReaderSize(conn, http2IOBufSize), writer: bufio.NewWriterSize(conn, http2IOBufSize), } f.fr = http2.NewFramer(f.writer, f.reader) + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } diff --git a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/transport.go b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/transport.go index 498cee544bcc..f027cae55885 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/transport.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/transport.go @@ -35,7 +35,7 @@ Package transport defines and implements message oriented communication channel to complete various transactions (e.g., an RPC). */ -package transport // import "google.golang.org/grpc/transport" +package transport import ( "bytes" @@ -47,6 +47,7 @@ import ( "time" "golang.org/x/net/context" + "golang.org/x/net/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" @@ -169,17 +170,13 @@ type Stream struct { ctx context.Context cancel context.CancelFunc // method records the associated RPC method of the stream. - method string - buf *recvBuffer - dec io.Reader - - // updateStreams indicates whether the transport's streamsQuota needed - // to be updated when this stream is closed. It is false when the transport - // sticks to the initial infinite value of the number of concurrent streams. - // Ture otherwise. - updateStreams bool - fc *inFlow - recvQuota uint32 + method string + recvCompress string + sendCompress string + buf *recvBuffer + dec io.Reader + fc *inFlow + recvQuota uint32 // The accumulated inbound quota pending for window update. updateQuota uint32 // The handler to control the window update procedure for both this @@ -206,6 +203,17 @@ type Stream struct { statusDesc string } +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + // Header acquires the key-value pairs of header metadata once it // is available. It blocks until i) the metadata is ready or ii) there is no // header metadata or iii) the stream is cancelled/expired. @@ -238,6 +246,11 @@ func (s *Stream) Context() context.Context { return s.ctx } +// TraceContext recreates the context of s with a trace.Trace. +func (s *Stream) TraceContext(tr trace.Trace) { + s.ctx = trace.NewContext(s.ctx, tr) +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -286,20 +299,18 @@ func (s *Stream) Read(p []byte) (n int, err error) { return } -type key int - // The key to save transport.Stream in the context. -const streamKey = key(0) +type streamKey struct{} // newContextWithStream creates a new context from ctx and attaches stream // to it. func newContextWithStream(ctx context.Context, stream *Stream) context.Context { - return context.WithValue(ctx, streamKey, stream) + return context.WithValue(ctx, streamKey{}, stream) } // StreamFromContext returns the stream saved in ctx. func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { - s, ok = ctx.Value(streamKey).(*Stream) + s, ok = ctx.Value(streamKey{}).(*Stream) return } @@ -314,15 +325,20 @@ const ( // NewServerTransport creates a ServerTransport with conn or non-nil error // if it fails. -func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32) (ServerTransport, error) { - return newHTTP2Server(conn, maxStreams) +func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) { + return newHTTP2Server(conn, maxStreams, authInfo) } // ConnectOptions covers all relevant options for dialing a server. type ConnectOptions struct { - Dialer func(string, time.Duration) (net.Conn, error) + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(string, time.Duration) (net.Conn, error) + // AuthOptions stores the credentials required to setup a client connection and/or issue RPCs. AuthOptions []credentials.Credentials - Timeout time.Duration + // Timeout specifies the timeout for dialing a client connection. + Timeout time.Duration } // NewClientTransport establishes the transport with the required ConnectOptions @@ -334,20 +350,40 @@ func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, e // Options provides additional hints and information for message // transmission. type Options struct { - // Indicate whether it is the last piece for this stream. + // Last indicates whether this write is the last piece for + // this stream. Last bool - // The hint to transport impl whether the data could be buffered for - // batching write. Transport impl can feel free to ignore it. + + // Delay is a hint to the transport implementation for whether + // the data could be buffered for a batching write. The + // Transport implementation may ignore the hint. Delay bool } // CallHdr carries the information of a particular RPC. type CallHdr struct { - Host string // peer host - Method string // the operation to perform on the specified host + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // RecvCompress specifies the compression algorithm applied on + // inbound messages. + RecvCompress string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Flush indicates whether a new stream command should be sent + // to the peer without waiting for the first data. This is + // only a hint. The transport may modify the flush decision + // for performance purposes. + Flush bool } -// ClientTransport is the common interface for all gRPC client side transport +// ClientTransport is the common interface for all gRPC client-side transport // implementations. type ClientTransport interface { // Close tears down this transport. Once it returns, the transport @@ -376,21 +412,35 @@ type ClientTransport interface { Error() <-chan struct{} } -// ServerTransport is the common interface for all gRPC server side transport +// ServerTransport is the common interface for all gRPC server-side transport // implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. type ServerTransport interface { - // WriteStatus sends the status of a stream to the client. - WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error - // Write sends the data for the given stream. - Write(s *Stream, data []byte, opts *Options) error - // WriteHeader sends the header metedata for the given stream. - WriteHeader(s *Stream, md metadata.MD) error // HandleStreams receives incoming streams using the given handler. HandleStreams(func(*Stream)) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. + // WriteStatus is the final call made on a stream and always + // occurs. + WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error + // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. Close() error + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr } // StreamErrorf creates an StreamError with the specified error code and description. diff --git a/Godeps/_workspace/src/github.com/docker/distribution/version/version.go b/Godeps/_workspace/src/github.com/docker/distribution/version/version.go index 450d15c28f5f..cafe23366bf4 100644 --- a/Godeps/_workspace/src/github.com/docker/distribution/version/version.go +++ b/Godeps/_workspace/src/github.com/docker/distribution/version/version.go @@ -8,4 +8,4 @@ var Package = "github.com/docker/distribution" // the latest release tag by hand, always suffixed by "+unknown". During // build, it will be replaced by the actual version. The value here will be // used if the registry is run after a go get based install. -var Version = "v2.1.0+unknown" +var Version = "v2.4.1+unknown" diff --git a/api/swagger-spec/oapi-v1.json b/api/swagger-spec/oapi-v1.json index bc3b2dd4c355..8473c10a6eda 100644 --- a/api/swagger-spec/oapi-v1.json +++ b/api/swagger-spec/oapi-v1.json @@ -22195,6 +22195,21 @@ "$ref": "v1.ImageSignature" }, "description": "Signatures holds all signatures of the image." + }, + "dockerImageSignatures": { + "type": "array", + "items": { + "$ref": "v1.Image.dockerImageSignatures" + }, + "description": "DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1." + }, + "dockerImageManifestMediaType": { + "type": "string", + "description": "DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2." + }, + "dockerImageConfig": { + "type": "string", + "description": "DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2." } } }, @@ -22203,7 +22218,8 @@ "description": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.", "required": [ "name", - "size" + "size", + "mediaType" ], "properties": { "name": { @@ -22214,6 +22230,10 @@ "type": "integer", "format": "int64", "description": "Size of the layer in bytes as defined by the underlying store." + }, + "mediaType": { + "type": "string", + "description": "MediaType of the referenced object." } } }, @@ -22334,6 +22354,10 @@ } } }, + "v1.Image.dockerImageSignatures": { + "id": "v1.Image.dockerImageSignatures", + "properties": {} + }, "v1.ImageStreamImage": { "id": "v1.ImageStreamImage", "description": "ImageStreamImage represents an Image that is retrieved by image name from an ImageStream.", diff --git a/hack/build-images.sh b/hack/build-images.sh index ea2f6cef5ee6..c09f0ef854b6 100755 --- a/hack/build-images.sh +++ b/hack/build-images.sh @@ -30,7 +30,7 @@ if [[ "${OS_RELEASE:-}" == "n" ]]; then echo "Building images from source ${OS_RELEASE_COMMIT}:" echo - os::build::build_static_binaries "${OS_IMAGE_COMPILE_TARGETS[@]-}" "${OS_SCRATCH_IMAGE_COMPILE_TARGETS[@]-}" + OS_GOFLAGS="${OS_GOFLAGS:-} ${OS_IMAGE_COMPILE_GOFLAGS}" os::build::build_static_binaries "${OS_IMAGE_COMPILE_TARGETS[@]-}" "${OS_SCRATCH_IMAGE_COMPILE_TARGETS[@]-}" os::build::place_bins "${OS_IMAGE_COMPILE_BINARIES[@]}" echo else diff --git a/images/dockerregistry/config.yml b/images/dockerregistry/config.yml index c06f700dcd35..90b3dfdc8174 100644 --- a/images/dockerregistry/config.yml +++ b/images/dockerregistry/config.yml @@ -14,9 +14,14 @@ auth: openshift: realm: openshift middleware: + registry: + - name: openshift repository: - name: openshift options: + acceptschema2: false pullthrough: true enforcequota: false projectcachettl: 1m + storage: + - name: openshift diff --git a/pkg/api/v1beta3/deep_copy_generated.go b/pkg/api/v1beta3/deep_copy_generated.go index 7ffed147bdac..5725f5f222df 100644 --- a/pkg/api/v1beta3/deep_copy_generated.go +++ b/pkg/api/v1beta3/deep_copy_generated.go @@ -1959,12 +1959,15 @@ func deepCopy_v1beta3_Image(in imageapiv1beta3.Image, out *imageapiv1beta3.Image } else { out.DockerImageLayers = nil } + out.DockerImageManifestMediaType = in.DockerImageManifestMediaType + out.DockerImageConfig = in.DockerImageConfig return nil } func deepCopy_v1beta3_ImageLayer(in imageapiv1beta3.ImageLayer, out *imageapiv1beta3.ImageLayer, c *conversion.Cloner) error { out.Name = in.Name out.Size = in.Size + out.MediaType = in.MediaType return nil } diff --git a/pkg/cmd/dockerregistry/dockerregistry.go b/pkg/cmd/dockerregistry/dockerregistry.go index 7be4ad5da045..fc24424c4eef 100644 --- a/pkg/cmd/dockerregistry/dockerregistry.go +++ b/pkg/cmd/dockerregistry/dockerregistry.go @@ -32,8 +32,7 @@ import ( _ "github.com/docker/distribution/registry/storage/driver/gcs" _ "github.com/docker/distribution/registry/storage/driver/inmemory" _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" - _ "github.com/docker/distribution/registry/storage/driver/oss" - _ "github.com/docker/distribution/registry/storage/driver/s3" + _ "github.com/docker/distribution/registry/storage/driver/s3-aws" _ "github.com/docker/distribution/registry/storage/driver/swift" "github.com/openshift/origin/pkg/cmd/server/crypto" diff --git a/pkg/cmd/server/bootstrappolicy/policy.go b/pkg/cmd/server/bootstrappolicy/policy.go index 3c0d6429212b..415926d33b6f 100644 --- a/pkg/cmd/server/bootstrappolicy/policy.go +++ b/pkg/cmd/server/bootstrappolicy/policy.go @@ -437,8 +437,8 @@ func GetBootstrapClusterRoles() []authorizationapi.ClusterRole { Rules: []authorizationapi.PolicyRule{ authorizationapi.NewRule("list").Groups(kapiGroup).Resources("limitranges", "resourcequotas").RuleOrDie(), - authorizationapi.NewRule("get", "delete").Groups(imageGroup).Resources("images").RuleOrDie(), - authorizationapi.NewRule("get").Groups(imageGroup).Resources("imagestreamimages", "imagestreamtags", "imagestreams/secrets").RuleOrDie(), + authorizationapi.NewRule("get", "delete").Groups(imageGroup).Resources("images", "imagestreamtags").RuleOrDie(), + authorizationapi.NewRule("get").Groups(imageGroup).Resources("imagestreamimages", "imagestreams/secrets").RuleOrDie(), authorizationapi.NewRule("get", "update").Groups(imageGroup).Resources("imagestreams").RuleOrDie(), authorizationapi.NewRule("create").Groups(imageGroup).Resources("imagestreammappings").RuleOrDie(), }, diff --git a/pkg/dockerregistry/server/admin.go b/pkg/dockerregistry/server/admin.go index aadf88550aa2..833519c1523e 100644 --- a/pkg/dockerregistry/server/admin.go +++ b/pkg/dockerregistry/server/admin.go @@ -48,13 +48,9 @@ func (bh *blobHandler) Delete(w http.ResponseWriter, req *http.Request) { return } - bd, err := storage.RegistryBlobDeleter(bh.Namespace()) - if err != nil { - bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - return - } + vacuum := storage.NewVacuum(bh.Context, dockerStorageDriver) - err = bd.Delete(bh, bh.Digest) + err := vacuum.RemoveBlob(bh.Digest.String()) if err != nil { // ignore not found error switch t := err.(type) { diff --git a/pkg/dockerregistry/server/blobdescriptorservice.go b/pkg/dockerregistry/server/blobdescriptorservice.go new file mode 100644 index 000000000000..7ace8d86fb11 --- /dev/null +++ b/pkg/dockerregistry/server/blobdescriptorservice.go @@ -0,0 +1,30 @@ +package server + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/middleware/registry" + "github.com/docker/distribution/registry/storage" +) + +func init() { + middleware.RegisterOptions(storage.BlobDescriptorServiceFactory(&blobDescriptorServiceFactory{})) +} + +// blobDescriptorServiceFactory needs to be able to work with blobs +// directly without using links. This allows us to ignore the distribution +// of blobs between repositories. +type blobDescriptorServiceFactory struct{} + +func (bf *blobDescriptorServiceFactory) BlobAccessController(svc distribution.BlobDescriptorService) distribution.BlobDescriptorService { + return &blobDescriptorService{svc} +} + +type blobDescriptorService struct { + distribution.BlobDescriptorService +} + +func (bs *blobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return dockerRegistry.BlobStatter().Stat(ctx, dgst) +} diff --git a/pkg/dockerregistry/server/quotarestrictedblobstore.go b/pkg/dockerregistry/server/quotarestrictedblobstore.go index 08af61c7d9b1..5173e3259d8c 100644 --- a/pkg/dockerregistry/server/quotarestrictedblobstore.go +++ b/pkg/dockerregistry/server/quotarestrictedblobstore.go @@ -112,10 +112,10 @@ type quotaRestrictedBlobStore struct { var _ distribution.BlobStore = "aRestrictedBlobStore{} // Create wraps returned blobWriter with quota guard wrapper. -func (bs *quotaRestrictedBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { +func (bs *quotaRestrictedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { context.GetLogger(ctx).Debug("(*quotaRestrictedBlobStore).Create: starting") - bw, err := bs.BlobStore.Create(ctx) + bw, err := bs.BlobStore.Create(ctx, options...) if err != nil { return nil, err } diff --git a/pkg/dockerregistry/server/registry.go b/pkg/dockerregistry/server/registry.go new file mode 100644 index 000000000000..7cf4c8c84f9a --- /dev/null +++ b/pkg/dockerregistry/server/registry.go @@ -0,0 +1,23 @@ +package server + +import ( + log "github.com/Sirupsen/logrus" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/middleware/registry" +) + +// dockerRegistry represents a collection of repositories, addressable by name. +// This variable holds the object created by the docker/distribution. We import +// it into our namespace because there are no other ways to access it. In other +// cases it is hidden from us. +var dockerRegistry distribution.Namespace + +func init() { + middleware.Register("openshift", func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) { + log.Info("OpenShift registry middleware initializing") + dockerRegistry = registry + return dockerRegistry, nil + }) +} diff --git a/pkg/dockerregistry/server/repositorymiddleware.go b/pkg/dockerregistry/server/repositorymiddleware.go index a8067dd8392c..2c2ac720ae89 100644 --- a/pkg/dockerregistry/server/repositorymiddleware.go +++ b/pkg/dockerregistry/server/repositorymiddleware.go @@ -11,6 +11,8 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + regapi "github.com/docker/distribution/registry/api/v2" repomw "github.com/docker/distribution/registry/middleware/repository" "github.com/docker/libtrust" @@ -40,6 +42,10 @@ const ( // objects. It takes a valid time duration string (e.g. "2m"). If empty, you get the default timeout. If // zero (e.g. "0m"), caching is disabled. ProjectCacheTTLEnvVar = "REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_PROJECTCACHETTL" + + // AcceptSchema2EnvVar is a boolean environment variable that allows to accept manifest schema v2 + // on manifest put requests. + AcceptSchema2EnvVar = "REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_ACCEPTSCHEMA2" ) var ( @@ -68,9 +74,17 @@ func init() { // DefaultRegistryClient before starting a registry. repomw.Register("openshift", func(ctx context.Context, repo distribution.Repository, options map[string]interface{}) (distribution.Repository, error) { - registryOSClient, kClient, err := DefaultRegistryClient.Clients() - if err != nil { - return nil, err + if dockerRegistry == nil { + panic(fmt.Sprintf("Configuration error: OpenShift registry middleware not activated")) + } + + if dockerStorageDriver == nil { + panic(fmt.Sprintf("Configuration error: OpenShift storage driver middleware not activated")) + } + + registryOSClient, kClient, errClients := DefaultRegistryClient.Clients() + if errClients != nil { + return nil, errClients } if quotaEnforcing == nil { quotaEnforcing = newQuotaEnforcingConfig(ctx, os.Getenv(EnforceQuotaEnvVar), os.Getenv(ProjectCacheTTLEnvVar), options) @@ -102,6 +116,8 @@ type repository struct { // if true, the repository will check remote references in the image stream to support pulling "through" // from a remote repository pullthrough bool + // acceptschema2 allows to refuse the manifest schema version 2 + acceptschema2 bool // cachedLayers remembers a mapping of layer digest to repositories recently seen with that image to avoid // having to check every potential upstream repository when a blob request is made. The cache is useful only // when session affinity is on for the registry, but in practice the first pull will fill the cache. @@ -124,16 +140,19 @@ func newRepositoryWithClient( return nil, fmt.Errorf("%s is required", DockerRegistryURLEnvVar) } - pullthrough := false - if value, ok := options["pullthrough"]; ok { - if b, ok := value.(bool); ok { - pullthrough = b - } + pullthrough := getBoolOption("pullthrough", false, options) + + acceptschema2 := false + + if os.Getenv(AcceptSchema2EnvVar) != "" { + acceptschema2 = os.Getenv(AcceptSchema2EnvVar) == "true" + } else { + acceptschema2 = getBoolOption("acceptschema2", false, options) } - nameParts := strings.SplitN(repo.Name(), "/", 2) + nameParts := strings.SplitN(repo.Named().Name(), "/", 2) if len(nameParts) != 2 { - return nil, fmt.Errorf("invalid repository name %q: it must be of the format /", repo.Name()) + return nil, fmt.Errorf("invalid repository name %q: it must be of the format /", repo.Named().Name()) } return &repository{ @@ -147,10 +166,21 @@ func newRepositoryWithClient( namespace: nameParts[0], name: nameParts[1], pullthrough: pullthrough, + acceptschema2: acceptschema2, cachedLayers: cachedLayers, }, nil } +func getBoolOption(name string, defval bool, options map[string]interface{}) bool { + if value, ok := options[name]; ok { + var b bool + if b, ok = value.(bool); ok { + return b + } + } + return defval +} + // Manifests returns r, which implements distribution.ManifestService. func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { if r.ctx == ctx { @@ -186,25 +216,18 @@ func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { } return bs - } -// Tags lists the tags under the named repository. -func (r *repository) Tags() ([]string, error) { - imageStream, err := r.getImageStream() - if err != nil { - return []string{}, nil +// Tags returns a reference to this repository tag service. +func (r *repository) Tags(ctx context.Context) distribution.TagService { + return &tagService{ + TagService: r.Repository.Tags(ctx), + repo: r, } - tags := []string{} - for tag := range imageStream.Status.Tags { - tags = append(tags, tag) - } - - return tags, nil } // Exists returns true if the manifest specified by dgst exists. -func (r *repository) Exists(dgst digest.Digest) (bool, error) { +func (r *repository) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { image, err := r.getImage(dgst) if err != nil { return false, err @@ -212,18 +235,8 @@ func (r *repository) Exists(dgst digest.Digest) (bool, error) { return image != nil, nil } -// ExistsByTag returns true if the manifest with tag `tag` exists. -func (r *repository) ExistsByTag(tag string) (bool, error) { - imageStream, err := r.getImageStream() - if err != nil { - return false, err - } - _, found := imageStream.Status.Tags[tag] - return found, nil -} - // Get retrieves the manifest with digest `dgst`. -func (r *repository) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { +func (r *repository) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { if _, err := r.getImageStreamImage(dgst); err != nil { context.GetLogger(r.ctx).Errorf("error retrieving ImageStreamImage %s/%s@%s: %v", r.namespace, r.name, dgst.String(), err) return nil, err @@ -236,137 +249,40 @@ func (r *repository) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { } ref := imageapi.DockerImageReference{Namespace: r.namespace, Name: r.name, Registry: r.registryAddr} - return r.manifestFromImageWithCachedLayers(image, ref.DockerClientDefaults().Exact()) -} + manifest, err := r.manifestFromImageWithCachedLayers(image, ref.DockerClientDefaults().Exact()) -// Enumerate retrieves digests of manifest revisions in particular repository -func (r *repository) Enumerate() ([]digest.Digest, error) { - panic("not implemented") + return manifest, err } -// GetByTag retrieves the named manifest with the provided tag -func (r *repository) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { - for _, opt := range options { - if err := opt(r); err != nil { - return nil, err - } - } - - // find the image mapped to this tag - imageStreamTag, err := r.getImageStreamTag(tag) - if err != nil { - // TODO: typed errors - context.GetLogger(r.ctx).Errorf("error getting ImageStreamTag %q: %v", tag, err) - return nil, err - } - image := &imageStreamTag.Image - - ref, referenceErr := imageapi.ParseDockerImageReference(image.DockerImageReference) - if referenceErr == nil { - ref.Namespace = r.namespace - ref.Name = r.name - ref.Registry = r.registryAddr - } - defaultRef := ref.DockerClientDefaults() - cacheName := defaultRef.AsRepository().Exact() - - // if we have a local manifest, use it - if len(image.DockerImageManifest) > 0 { - return r.manifestFromImageWithCachedLayers(image, cacheName) - } - - dgst, err := digest.ParseDigest(imageStreamTag.Image.Name) - if err != nil { - context.GetLogger(r.ctx).Errorf("error parsing digest %q: %v", imageStreamTag.Image.Name, err) - return nil, err - } - - if localImage, err := r.getImage(dgst); err != nil { - // if the image is managed by OpenShift and we cannot load the image, report an error - if image.Annotations[imageapi.ManagedByOpenShiftAnnotation] == "true" { - context.GetLogger(r.ctx).Errorf("error getting image %q: %v", dgst.String(), err) - return nil, err - } - } else { - // if we have a local manifest, use it - if len(localImage.DockerImageManifest) > 0 { - return r.manifestFromImageWithCachedLayers(localImage, cacheName) - } - } - - // allow pullthrough to be disabled - if !r.pullthrough { - return nil, distribution.ErrManifestBlobUnknown{Digest: dgst} - } - - // check the previous error here - if referenceErr != nil { - context.GetLogger(r.ctx).Errorf("error parsing image %q: %v", image.DockerImageReference, referenceErr) - return nil, referenceErr - } - - return r.pullthroughGetByTag(image, ref, cacheName, options...) -} - -// pullthroughGetByTag attempts to load the given image manifest from the remote server defined by ref, using cacheName to store any cached layers. -func (r *repository) pullthroughGetByTag(image *imageapi.Image, ref imageapi.DockerImageReference, cacheName string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { - defaultRef := ref.DockerClientDefaults() - - retriever := r.importContext() +// Put creates or updates the named manifest. +func (r *repository) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + var canonical []byte - repo, err := retriever.Repository(r.ctx, defaultRef.RegistryURL(), defaultRef.RepositoryName(), false) + // Resolve the payload in the manifest. + mediatype, payload, err := manifest.Payload() if err != nil { - context.GetLogger(r.ctx).Errorf("error getting remote repository for image %q: %v", image.DockerImageReference, err) - return nil, err + return "", err } - // get a manifest context - manifests, err := repo.Manifests(r.ctx) - if err != nil { - context.GetLogger(r.ctx).Errorf("error getting manifests for image %q: %v", image.DockerImageReference, err) - return nil, err + switch manifest.(type) { + case *schema1.SignedManifest: + canonical = manifest.(*schema1.SignedManifest).Canonical + case *schema2.DeserializedManifest: + canonical = payload + default: + err = fmt.Errorf("unrecognized manifest type %T", manifest) + return "", regapi.ErrorCodeManifestInvalid.WithDetail(err) } - // fetch this by image - if len(ref.ID) > 0 { - dgst, err := digest.ParseDigest(ref.ID) - if err != nil { - context.GetLogger(r.ctx).Errorf("error getting manifests for image %q: %v", image.DockerImageReference, err) - return nil, err + if !r.acceptschema2 { + if _, ok := manifest.(*schema1.SignedManifest); !ok { + err = fmt.Errorf("schema version 2 disabled") + return "", regapi.ErrorCodeManifestInvalid.WithDetail(err) } - manifest, err := manifests.Get(dgst) - if err != nil { - context.GetLogger(r.ctx).Errorf("error getting manifest from remote server for image %q: %v", image.DockerImageReference, err) - return nil, err - } - r.rememberLayers(manifest, cacheName) - return manifest, nil - } - - // fetch this by tag - manifest, err := manifests.GetByTag(ref.Tag, options...) - if err != nil { - context.GetLogger(r.ctx).Errorf("error getting manifest from remote server for image %q: %v", image.DockerImageReference, err) - return nil, err - } - - r.rememberLayers(manifest, cacheName) - return manifest, nil -} - -// Put creates or updates the named manifest. -func (r *repository) Put(manifest *schema1.SignedManifest) error { - // Resolve the payload in the manifest. - payload, err := manifest.Payload() - if err != nil { - return err } // Calculate digest - dgst, err := digest.FromBytes(payload) - if err != nil { - return err - } + dgst := digest.FromBytes(canonical) // Upload to openshift ism := imageapi.ImageStreamMapping{ @@ -374,7 +290,6 @@ func (r *repository) Put(manifest *schema1.SignedManifest) error { Namespace: r.namespace, Name: r.name, }, - Tag: manifest.Tag, Image: imageapi.Image{ ObjectMeta: kapi.ObjectMeta{ Name: dgst.String(), @@ -382,32 +297,40 @@ func (r *repository) Put(manifest *schema1.SignedManifest) error { imageapi.ManagedByOpenShiftAnnotation: "true", }, }, - DockerImageReference: fmt.Sprintf("%s/%s/%s@%s", r.registryAddr, r.namespace, r.name, dgst.String()), - DockerImageManifest: string(manifest.Raw), + DockerImageReference: fmt.Sprintf("%s/%s/%s@%s", r.registryAddr, r.namespace, r.name, dgst.String()), + DockerImageManifest: string(payload), + DockerImageManifestMediaType: mediatype, }, } - if err := r.fillImageWithMetadata(manifest, &ism.Image); err != nil { - return err + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + ism.Tag = opt.Tag + break + } + } + + if err = r.fillImageWithMetadata(manifest, &ism.Image); err != nil { + return "", err } - if err := r.registryOSClient.ImageStreamMappings(r.namespace).Create(&ism); err != nil { + if err = r.registryOSClient.ImageStreamMappings(r.namespace).Create(&ism); err != nil { // if the error was that the image stream wasn't found, try to auto provision it statusErr, ok := err.(*kerrors.StatusError) if !ok { context.GetLogger(r.ctx).Errorf("error creating ImageStreamMapping: %s", err) - return err + return "", err } if quotautil.IsErrorQuotaExceeded(statusErr) { context.GetLogger(r.ctx).Errorf("denied creating ImageStreamMapping: %v", statusErr) - return distribution.ErrAccessDenied + return "", distribution.ErrAccessDenied } status := statusErr.ErrStatus if status.Code != http.StatusNotFound || (strings.ToLower(status.Details.Kind) != "imagestream" /*pre-1.2*/ && strings.ToLower(status.Details.Kind) != "imagestreams") || status.Details.Name != r.name { context.GetLogger(r.ctx).Errorf("error creating ImageStreamMapping: %s", err) - return err + return "", err } stream := imageapi.ImageStream{ @@ -416,78 +339,110 @@ func (r *repository) Put(manifest *schema1.SignedManifest) error { }, } - client, ok := UserClientFrom(r.ctx) + uclient, ok := UserClientFrom(r.ctx) if !ok { context.GetLogger(r.ctx).Errorf("error creating user client to auto provision image stream: Origin user client unavailable") - return statusErr + return "", statusErr } - if _, err := client.ImageStreams(r.namespace).Create(&stream); err != nil { + if _, err := uclient.ImageStreams(r.namespace).Create(&stream); err != nil { if quotautil.IsErrorQuotaExceeded(err) { context.GetLogger(r.ctx).Errorf("denied creating ImageStream: %v", err) - return distribution.ErrAccessDenied + return "", distribution.ErrAccessDenied } context.GetLogger(r.ctx).Errorf("error auto provisioning ImageStream: %s", err) - return statusErr + return "", statusErr } // try to create the ISM again if err := r.registryOSClient.ImageStreamMappings(r.namespace).Create(&ism); err != nil { if quotautil.IsErrorQuotaExceeded(err) { context.GetLogger(r.ctx).Errorf("denied a creation of ImageStreamMapping: %v", err) - return distribution.ErrAccessDenied + return "", distribution.ErrAccessDenied } context.GetLogger(r.ctx).Errorf("error creating ImageStreamMapping: %s", err) - return err + return "", err } } - // Grab each json signature and store them. - signatures, err := manifest.Signatures() - if err != nil { - return err - } + return dgst, nil +} - for _, signature := range signatures { - if err := r.Signatures().Put(dgst, signature); err != nil { - context.GetLogger(r.ctx).Errorf("error storing signature: %s", err) - return err - } +// fillImageWithMetadata fills a given image with metadata. +func (r *repository) fillImageWithMetadata(manifest distribution.Manifest, image *imageapi.Image) error { + if deserializedManifest, ok := manifest.(*schema2.DeserializedManifest); ok { + r.deserializedManifestFillImageMetadata(deserializedManifest, image) + } else if signedManifest, ok := manifest.(*schema1.SignedManifest); ok { + r.signedManifestFillImageMetadata(signedManifest, image) + } else { + return fmt.Errorf("unrecognized manifest type %T", manifest) } + context.GetLogger(r.ctx).Infof("total size of image %s with docker ref %s: %d", image.Name, image.DockerImageReference, image.DockerImageMetadata.Size) return nil } -// fillImageWithMetadata fills a given image with metadata. Also correct layer sizes with blob sizes. Newer +// signedManifestFillImageMetadata fills a given image with metadata. It also corrects layer sizes with blob sizes. Newer // Docker client versions don't set layer sizes in the manifest at all. Origin master needs correct layer // sizes for proper image quota support. That's why we need to fill the metadata in the registry. -func (r *repository) fillImageWithMetadata(manifest *schema1.SignedManifest, image *imageapi.Image) error { +func (r *repository) signedManifestFillImageMetadata(manifest *schema1.SignedManifest, image *imageapi.Image) error { + signatures, err := manifest.Signatures() + if err != nil { + return err + } + + for _, signDigest := range signatures { + image.DockerImageSignatures = append(image.DockerImageSignatures, signDigest) + } + if err := imageapi.ImageWithMetadata(image); err != nil { return err } + refs := manifest.References() + layerSet := sets.NewString() - size := int64(0) + image.DockerImageMetadata.Size = int64(0) blobs := r.Blobs(r.ctx) for i := range image.DockerImageLayers { layer := &image.DockerImageLayers[i] // DockerImageLayers represents manifest.Manifest.FSLayers in reversed order - desc, err := blobs.Stat(r.ctx, manifest.Manifest.FSLayers[len(image.DockerImageLayers)-i-1].BlobSum) + desc, err := blobs.Stat(r.ctx, refs[len(image.DockerImageLayers)-i-1].Digest) if err != nil { context.GetLogger(r.ctx).Errorf("failed to stat blobs %s of image %s", layer.Name, image.DockerImageReference) return err } + if layer.MediaType == "" { + if desc.MediaType != "" { + layer.MediaType = desc.MediaType + } else { + layer.MediaType = schema1.MediaTypeManifestLayer + } + } layer.LayerSize = desc.Size // count empty layer just once (empty layer may actually have non-zero size) if !layerSet.Has(layer.Name) { - size += desc.Size + image.DockerImageMetadata.Size += desc.Size layerSet.Insert(layer.Name) } } - image.DockerImageMetadata.Size = size - context.GetLogger(r.ctx).Infof("total size of image %s with docker ref %s: %d", image.Name, image.DockerImageReference, size) + return nil +} + +// deserializedManifestFillImageMetadata fills a given image with metadata. +func (r *repository) deserializedManifestFillImageMetadata(manifest *schema2.DeserializedManifest, image *imageapi.Image) error { + configBytes, err := r.Blobs(r.ctx).Get(r.ctx, manifest.Config.Digest) + if err != nil { + context.GetLogger(r.ctx).Errorf("failed to get image config %s: %v", manifest.Config.Digest.String(), err) + return err + } + image.DockerImageConfig = string(configBytes) + + if err := imageapi.ImageWithMetadata(image); err != nil { + return err + } return nil } @@ -495,12 +450,12 @@ func (r *repository) fillImageWithMetadata(manifest *schema1.SignedManifest, ima // Delete deletes the manifest with digest `dgst`. Note: Image resources // in OpenShift are deleted via 'oadm prune images'. This function deletes // the content related to the manifest in the registry's storage (signatures). -func (r *repository) Delete(dgst digest.Digest) error { +func (r *repository) Delete(ctx context.Context, dgst digest.Digest) error { ms, err := r.Repository.Manifests(r.ctx) if err != nil { return err } - return ms.Delete(dgst) + return ms.Delete(ctx, dgst) } // importContext loads secrets for this image stream and returns a context for getting distribution @@ -508,7 +463,7 @@ func (r *repository) Delete(dgst digest.Digest) error { func (r *repository) importContext() importer.RepositoryRetriever { secrets, err := r.registryOSClient.ImageStreamSecrets(r.namespace).Secrets(r.name, kapi.ListOptions{}) if err != nil { - context.GetLogger(r.ctx).Errorf("error getting secrets for repository %q: %v", r.Name(), err) + context.GetLogger(r.ctx).Errorf("error getting secrets for repository %q: %v", r.Named().Name(), err) secrets = &kapi.SecretList{} } credentials := importer.NewCredentialsForSecrets(secrets.Items) @@ -525,12 +480,6 @@ func (r *repository) getImage(dgst digest.Digest) (*imageapi.Image, error) { return r.registryOSClient.Images().Get(dgst.String()) } -// getImageStreamTag retrieves the Image with tag `tag` for the ImageStream -// associated with r. -func (r *repository) getImageStreamTag(tag string) (*imageapi.ImageStreamTag, error) { - return r.registryOSClient.ImageStreamTags(r.namespace).Get(r.name, tag) -} - // getImageStreamImage retrieves the Image with digest `dgst` for the ImageStream // associated with r. This ensures the image belongs to the image stream. func (r *repository) getImageStreamImage(dgst digest.Digest) (*imageapi.ImageStreamImage, error) { @@ -538,50 +487,71 @@ func (r *repository) getImageStreamImage(dgst digest.Digest) (*imageapi.ImageStr } // rememberLayers caches the provided layers -func (r *repository) rememberLayers(manifest *schema1.SignedManifest, cacheName string) { +func (r *repository) rememberLayers(manifest distribution.Manifest, cacheName string) { if !r.pullthrough { return } // remember the layers in the cache as an optimization to avoid searching all remote repositories - for _, layer := range manifest.FSLayers { - r.cachedLayers.RememberDigest(layer.BlobSum, cacheName) + for _, layer := range manifest.References() { + r.cachedLayers.RememberDigest(layer.Digest, cacheName) } } // manifestFromImageWithCachedLayers loads the image and then caches any located layers -func (r *repository) manifestFromImageWithCachedLayers(image *imageapi.Image, cacheName string) (*schema1.SignedManifest, error) { - manifest, err := r.manifestFromImage(image) +func (r *repository) manifestFromImageWithCachedLayers(image *imageapi.Image, cacheName string) (manifest distribution.Manifest, err error) { + if image.DockerImageManifestMediaType == schema2.MediaTypeManifest { + manifest, err = r.deserializedManifestFromImage(image) + } else { + manifest, err = r.signedManifestFromImage(image) + } + if err != nil { - return nil, err + return } + r.rememberLayers(manifest, cacheName) - return manifest, nil + return } -// manifestFromImage converts an Image to a SignedManifest. -func (r *repository) manifestFromImage(image *imageapi.Image) (*schema1.SignedManifest, error) { - dgst, err := digest.ParseDigest(image.Name) - if err != nil { - return nil, err +// signedManifestFromImage converts an Image to a SignedManifest. +func (r *repository) signedManifestFromImage(image *imageapi.Image) (*schema1.SignedManifest, error) { + if image.DockerImageManifestMediaType == schema2.MediaTypeManifest { + context.GetLogger(r.ctx).Errorf("old client pulling new image %s", image.DockerImageReference) + return nil, fmt.Errorf("unable to convert new image to old one") } raw := []byte(image.DockerImageManifest) - // prefer signatures from the manifest if _, err := libtrust.ParsePrettySignature(raw, "signatures"); err == nil { - sm := schema1.SignedManifest{Raw: raw} - if err := json.Unmarshal(raw, &sm); err == nil { + sm := schema1.SignedManifest{Canonical: raw} + if err = json.Unmarshal(raw, &sm); err == nil { return &sm, nil } } - // Fetch the signatures for the manifest - signatures, err := r.Signatures().Get(dgst) + dgst, err := digest.ParseDigest(image.Name) if err != nil { return nil, err } - jsig, err := libtrust.NewJSONSignature(raw, signatures...) + var signBytes [][]byte + if len(image.DockerImageSignatures) == 0 { + // Fetch the signatures for the manifest + signatures, errSign := r.getSignatures(dgst) + if errSign != nil { + return nil, errSign + } + + for _, signatureDigest := range signatures { + signBytes = append(signBytes, []byte(signatureDigest)) + } + } else { + for _, sign := range image.DockerImageSignatures { + signBytes = append(signBytes, sign) + } + } + + jsig, err := libtrust.NewJSONSignature(raw, signBytes...) if err != nil { return nil, err } @@ -593,8 +563,39 @@ func (r *repository) manifestFromImage(image *imageapi.Image) (*schema1.SignedMa } var sm schema1.SignedManifest - if err := json.Unmarshal(raw, &sm); err != nil { + if err = json.Unmarshal(raw, &sm); err != nil { return nil, err } return &sm, err } + +func (r *repository) getSignatures(dgst digest.Digest) ([]digest.Digest, error) { + // We can not use the r.repository here. docker/distribution wraps all the methods that + // write or read blobs. It is made for notifications service. We need to get a real + // repository without any wrappers. + repository, err := dockerRegistry.Repository(r.ctx, r.Named()) + if err != nil { + return nil, err + } + + manifestService, err := repository.Manifests(r.ctx) + if err != nil { + return nil, err + } + + signaturesGetter, ok := manifestService.(distribution.SignaturesGetter) + if !ok { + return nil, fmt.Errorf("unable to convert ManifestService into SignaturesGetter") + } + + return signaturesGetter.GetSignatures(r.ctx, dgst) +} + +// deserializedManifestFromImage converts an Image to a DeserializedManifest. +func (r *repository) deserializedManifestFromImage(image *imageapi.Image) (*schema2.DeserializedManifest, error) { + var manifest schema2.DeserializedManifest + if err := json.Unmarshal([]byte(image.DockerImageManifest), &manifest); err != nil { + return nil, err + } + return &manifest, nil +} diff --git a/pkg/dockerregistry/server/storagedriver.go b/pkg/dockerregistry/server/storagedriver.go new file mode 100644 index 000000000000..a1f3cf1a0559 --- /dev/null +++ b/pkg/dockerregistry/server/storagedriver.go @@ -0,0 +1,26 @@ +package server + +import ( + log "github.com/Sirupsen/logrus" + + storagedriver "github.com/docker/distribution/registry/storage/driver" + registrystorage "github.com/docker/distribution/registry/storage/driver/middleware" +) + +// dockerStorageDriver gives access to the blob store. +// This variable holds the object created by the docker/distribution. We import +// it into our namespace because there are no other ways to access it. In other +// cases it is hidden from us. +var dockerStorageDriver storagedriver.StorageDriver + +func init() { + registrystorage.Register("openshift", func(driver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { + log.Info("OpenShift middleware for storage driver initializing") + + // We can do this because of an initialization sequence of middlewares. + // Storage driver is required to create registry. So we can be sure that + // this assignment will happen before registry and repository initialization. + dockerStorageDriver = driver + return dockerStorageDriver, nil + }) +} diff --git a/pkg/dockerregistry/server/tagservice.go b/pkg/dockerregistry/server/tagservice.go new file mode 100644 index 000000000000..fb561754d2d1 --- /dev/null +++ b/pkg/dockerregistry/server/tagservice.go @@ -0,0 +1,219 @@ +package server + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + + kapi "k8s.io/kubernetes/pkg/api" + + imageapi "github.com/openshift/origin/pkg/image/api" + quotautil "github.com/openshift/origin/pkg/quota/util" +) + +type tagService struct { + distribution.TagService + + repo *repository +} + +func (t tagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + imageStream, err := t.repo.getImageStream() + if err != nil { + context.GetLogger(ctx).Errorf("error retrieving ImageStream %s/%s: %v", t.repo.namespace, t.repo.name, err) + return distribution.Descriptor{}, distribution.ErrRepositoryUnknown{Name: t.repo.Named().Name()} + } + + te := imageapi.LatestTaggedImage(imageStream, tag) + if te == nil { + return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} + } + dgst, err := digest.ParseDigest(te.Image) + if err != nil { + return distribution.Descriptor{}, err + } + + if !t.repo.pullthrough { + image, err := t.repo.getImage(dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + if !isImageManaged(image) { + return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} + } + } + + return distribution.Descriptor{Digest: dgst}, nil +} + +func (t tagService) All(ctx context.Context) ([]string, error) { + tags := []string{} + + imageStream, err := t.repo.getImageStream() + if err != nil { + context.GetLogger(ctx).Errorf("error retrieving ImageStream %s/%s: %v", t.repo.namespace, t.repo.name, err) + return tags, distribution.ErrRepositoryUnknown{Name: t.repo.Named().Name()} + } + + managedImages := make(map[string]bool) + + for tag, history := range imageStream.Status.Tags { + if len(history.Items) == 0 { + continue + } + + if t.repo.pullthrough { + tags = append(tags, tag) + continue + } + + managed, found := managedImages[history.Items[0].Image] + if !found { + dgst, err := digest.ParseDigest(history.Items[0].Image) + if err != nil { + context.GetLogger(ctx).Errorf("bad digest %s: %v", history.Items[0].Image, err) + continue + } + + image, err := t.repo.getImage(dgst) + if err != nil { + context.GetLogger(ctx).Errorf("unable to get image %s/%s %s: %v", t.repo.namespace, t.repo.name, dgst.String(), err) + continue + } + managed = isImageManaged(image) + managedImages[history.Items[0].Image] = managed + } + + if !managed { + continue + } + + tags = append(tags, tag) + } + return tags, nil +} + +func (t tagService) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { + tags := []string{} + + imageStream, err := t.repo.getImageStream() + if err != nil { + context.GetLogger(ctx).Errorf("error retrieving ImageStream %s/%s: %v", t.repo.namespace, t.repo.name, err) + return tags, distribution.ErrRepositoryUnknown{Name: t.repo.Named().Name()} + } + + managedImages := make(map[string]bool) + + for tag, history := range imageStream.Status.Tags { + if len(history.Items) == 0 { + continue + } + + dgst, err := digest.ParseDigest(history.Items[0].Image) + if err != nil { + context.GetLogger(ctx).Errorf("bad digest %s: %v", history.Items[0].Image, err) + continue + } + + if dgst != desc.Digest { + continue + } + + if t.repo.pullthrough { + tags = append(tags, tag) + continue + } + + managed, found := managedImages[history.Items[0].Image] + if !found { + image, err := t.repo.getImage(dgst) + if err != nil { + context.GetLogger(ctx).Errorf("unable to get image %s/%s %s: %v", t.repo.namespace, t.repo.name, dgst.String(), err) + continue + } + managed = isImageManaged(image) + managedImages[history.Items[0].Image] = managed + } + + if !managed { + continue + } + + tags = append(tags, tag) + } + + return tags, nil +} + +func (t tagService) Tag(ctx context.Context, tag string, dgst distribution.Descriptor) error { + imageStream, err := t.repo.getImageStream() + if err != nil { + context.GetLogger(ctx).Errorf("error retrieving ImageStream %s/%s: %v", t.repo.namespace, t.repo.name, err) + return distribution.ErrRepositoryUnknown{Name: t.repo.Named().Name()} + } + + image, err := t.repo.registryOSClient.Images().Get(dgst.Digest.String()) + if err != nil { + context.GetLogger(ctx).Errorf("unable to get image: %s", dgst.Digest.String()) + return err + } + image.SetResourceVersion("") + + if !t.repo.pullthrough && !isImageManaged(image) { + return distribution.ErrRepositoryUnknown{Name: t.repo.Named().Name()} + } + + ism := imageapi.ImageStreamMapping{ + ObjectMeta: kapi.ObjectMeta{ + Namespace: imageStream.Namespace, + Name: imageStream.Name, + }, + Tag: tag, + Image: *image, + } + + err = t.repo.registryOSClient.ImageStreamMappings(imageStream.Namespace).Create(&ism) + if quotautil.IsErrorQuotaExceeded(err) { + context.GetLogger(ctx).Errorf("denied creating ImageStreamMapping: %v", err) + return distribution.ErrAccessDenied + } + + return err +} + +func (t tagService) Untag(ctx context.Context, tag string) error { + imageStream, err := t.repo.getImageStream() + if err != nil { + context.GetLogger(ctx).Errorf("error retrieving ImageStream %s/%s: %v", t.repo.namespace, t.repo.name, err) + return distribution.ErrRepositoryUnknown{Name: t.repo.Named().Name()} + } + + te := imageapi.LatestTaggedImage(imageStream, tag) + if te == nil { + return distribution.ErrTagUnknown{Tag: tag} + } + + if !t.repo.pullthrough { + dgst, err := digest.ParseDigest(te.Image) + if err != nil { + return err + } + + image, err := t.repo.getImage(dgst) + if err != nil { + return err + } + + if !isImageManaged(image) { + return distribution.ErrTagUnknown{Tag: tag} + } + } + + return t.repo.registryOSClient.ImageStreamTags(imageStream.Namespace).Delete(imageStream.Name, tag) +} + +func isImageManaged(image *imageapi.Image) bool { + managed, ok := image.ObjectMeta.Annotations[imageapi.ManagedByOpenShiftAnnotation] + return ok && managed == "true" +} diff --git a/pkg/image/api/deep_copy_generated.go b/pkg/image/api/deep_copy_generated.go index ed55cbf62d9c..52f183cf7979 100644 --- a/pkg/image/api/deep_copy_generated.go +++ b/pkg/image/api/deep_copy_generated.go @@ -14,9 +14,12 @@ func init() { if err := api.Scheme.AddGeneratedDeepCopyFuncs( DeepCopy_api_Descriptor, DeepCopy_api_DockerConfig, + DeepCopy_api_DockerConfigHistory, + DeepCopy_api_DockerConfigRootFS, DeepCopy_api_DockerFSLayer, DeepCopy_api_DockerHistory, DeepCopy_api_DockerImage, + DeepCopy_api_DockerImageConfig, DeepCopy_api_DockerImageManifest, DeepCopy_api_DockerImageReference, DeepCopy_api_DockerV1CompatibilityImage, @@ -167,6 +170,29 @@ func DeepCopy_api_DockerConfig(in DockerConfig, out *DockerConfig, c *conversion return nil } +func DeepCopy_api_DockerConfigHistory(in DockerConfigHistory, out *DockerConfigHistory, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_Time(in.Created, &out.Created, c); err != nil { + return err + } + out.Author = in.Author + out.CreatedBy = in.CreatedBy + out.Comment = in.Comment + out.EmptyLayer = in.EmptyLayer + return nil +} + +func DeepCopy_api_DockerConfigRootFS(in DockerConfigRootFS, out *DockerConfigRootFS, c *conversion.Cloner) error { + out.Type = in.Type + if in.DiffIDs != nil { + in, out := in.DiffIDs, &out.DiffIDs + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.DiffIDs = nil + } + return nil +} + func DeepCopy_api_DockerFSLayer(in DockerFSLayer, out *DockerFSLayer, c *conversion.Cloner) error { out.DockerBlobSum = in.DockerBlobSum return nil @@ -207,6 +233,41 @@ func DeepCopy_api_DockerImage(in DockerImage, out *DockerImage, c *conversion.Cl return nil } +func DeepCopy_api_DockerImageConfig(in DockerImageConfig, out *DockerImageConfig, c *conversion.Cloner) error { + if err := DeepCopy_api_DockerImage(in.DockerImage, &out.DockerImage, c); err != nil { + return err + } + if in.RootFS != nil { + in, out := in.RootFS, &out.RootFS + *out = new(DockerConfigRootFS) + if err := DeepCopy_api_DockerConfigRootFS(*in, *out, c); err != nil { + return err + } + } else { + out.RootFS = nil + } + if in.History != nil { + in, out := in.History, &out.History + *out = make([]DockerConfigHistory, len(in)) + for i := range in { + if err := DeepCopy_api_DockerConfigHistory(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.History = nil + } + out.OSVersion = in.OSVersion + if in.OSFeatures != nil { + in, out := in.OSFeatures, &out.OSFeatures + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.OSFeatures = nil + } + return nil +} + func DeepCopy_api_DockerImageManifest(in DockerImageManifest, out *DockerImageManifest, c *conversion.Cloner) error { out.SchemaVersion = in.SchemaVersion out.MediaType = in.MediaType @@ -328,6 +389,21 @@ func DeepCopy_api_Image(in Image, out *Image, c *conversion.Cloner) error { } else { out.Signatures = nil } + if in.DockerImageSignatures != nil { + in, out := in.DockerImageSignatures, &out.DockerImageSignatures + *out = make([][]byte, len(in)) + for i := range in { + if newVal, err := c.DeepCopy(in[i]); err != nil { + return err + } else { + (*out)[i] = newVal.([]byte) + } + } + } else { + out.DockerImageSignatures = nil + } + out.DockerImageManifestMediaType = in.DockerImageManifestMediaType + out.DockerImageConfig = in.DockerImageConfig return nil } @@ -371,6 +447,7 @@ func DeepCopy_api_ImageImportStatus(in ImageImportStatus, out *ImageImportStatus func DeepCopy_api_ImageLayer(in ImageLayer, out *ImageLayer, c *conversion.Cloner) error { out.Name = in.Name out.LayerSize = in.LayerSize + out.MediaType = in.MediaType return nil } diff --git a/pkg/image/api/dockertypes.go b/pkg/image/api/dockertypes.go index cccca50d4d35..18405fe214e4 100644 --- a/pkg/image/api/dockertypes.go +++ b/pkg/image/api/dockertypes.go @@ -121,3 +121,27 @@ type DockerV1CompatibilityImage struct { type DockerV1CompatibilityImageSize struct { Size int64 `json:"size,omitempty"` } + +// DockerImageConfig stores the image configuration +type DockerImageConfig struct { + DockerImage `json:",inline"` + RootFS *DockerConfigRootFS `json:"rootfs,omitempty"` + History []DockerConfigHistory `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// DockerConfigHistory stores build commands that were used to create an image +type DockerConfigHistory struct { + Created unversioned.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// DockerConfigRootFS describes images root filesystem +type DockerConfigRootFS struct { + Type string `json:"type"` + DiffIDs []string `json:"diff_ids,omitempty"` +} diff --git a/pkg/image/api/helper.go b/pkg/image/api/helper.go index 7e26efd2e2e9..ec17d98c988c 100644 --- a/pkg/image/api/helper.go +++ b/pkg/image/api/helper.go @@ -15,6 +15,7 @@ import ( "github.com/blang/semver" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" "github.com/golang/glog" ) @@ -382,14 +383,54 @@ func ManifestMatchesImage(image *Image, newManifest []byte) (bool, error) { if err != nil { return false, err } - sm := schema1.SignedManifest{Raw: newManifest} - raw, err := sm.Payload() + var canonical []byte + + switch image.DockerImageManifestMediaType { + case schema2.MediaTypeManifest: + var m schema2.DeserializedManifest + if err := json.Unmarshal(newManifest, &m); err != nil { + return false, err + } + _, canonical, err = m.Payload() + if err != nil { + return false, err + } + case schema1.MediaTypeManifest, "": + var m schema1.SignedManifest + if err := json.Unmarshal(newManifest, &m); err != nil { + return false, err + } + canonical = m.Canonical + default: + return false, fmt.Errorf("unsupported manifest mediatype: %s", image.DockerImageManifestMediaType) + } + if _, err := v.Write(canonical); err != nil { + return false, err + } + return v.Verified(), nil +} + +// ImageConfigMatchesImage returns true if the provided image config matches a digest +// stored in the manifest of the image. +func ImageConfigMatchesImage(image *Image, imageConfig []byte) (bool, error) { + if image.DockerImageManifestMediaType != schema2.MediaTypeManifest { + return false, nil + } + + var m schema2.DeserializedManifest + if err := json.Unmarshal([]byte(image.DockerImageManifest), &m); err != nil { + return false, err + } + + v, err := digest.NewDigestVerifier(m.Config.Digest) if err != nil { return false, err } - if _, err := v.Write(raw); err != nil { + + if _, err := v.Write(imageConfig); err != nil { return false, err } + return v.Verified(), nil } @@ -429,6 +470,7 @@ func ImageWithMetadata(image *Image) error { image.DockerImageLayers = make([]ImageLayer, len(manifest.FSLayers)) for i, layer := range manifest.FSLayers { + image.DockerImageLayers[i].MediaType = schema1.MediaTypeManifestLayer image.DockerImageLayers[i].Name = layer.DockerBlobSum } if len(manifest.History) == len(image.DockerImageLayers) { @@ -469,8 +511,41 @@ func ImageWithMetadata(image *Image) error { image.DockerImageMetadata.Size = v1Metadata.Size } case 2: - // TODO: need to prepare for this - return fmt.Errorf("unrecognized Docker image manifest schema %d for %q (%s)", manifest.SchemaVersion, image.Name, image.DockerImageReference) + config := DockerImageConfig{} + if err := json.Unmarshal([]byte(image.DockerImageConfig), &config); err != nil { + return err + } + + image.DockerImageLayers = make([]ImageLayer, len(manifest.Layers)) + for i, layer := range manifest.Layers { + image.DockerImageLayers[i].Name = layer.Digest + image.DockerImageLayers[i].LayerSize = layer.Size + image.DockerImageLayers[i].MediaType = layer.MediaType + } + // reverse order of the layers for v1 (lowest = 0, highest = i) + for i, j := 0, len(image.DockerImageLayers)-1; i < j; i, j = i+1, j-1 { + image.DockerImageLayers[i], image.DockerImageLayers[j] = image.DockerImageLayers[j], image.DockerImageLayers[i] + } + + image.DockerImageMetadata.ID = manifest.Config.Digest + image.DockerImageMetadata.Parent = config.Parent + image.DockerImageMetadata.Comment = config.Comment + image.DockerImageMetadata.Created = config.Created + image.DockerImageMetadata.Container = config.Container + image.DockerImageMetadata.ContainerConfig = config.ContainerConfig + image.DockerImageMetadata.DockerVersion = config.DockerVersion + image.DockerImageMetadata.Author = config.Author + image.DockerImageMetadata.Config = config.Config + image.DockerImageMetadata.Architecture = config.Architecture + image.DockerImageMetadata.Size = int64(len(image.DockerImageConfig)) + + if len(image.DockerImageLayers) > 0 { + for _, layer := range image.DockerImageLayers { + image.DockerImageMetadata.Size += layer.LayerSize + } + } else { + image.DockerImageMetadata.Size += config.Size + } default: return fmt.Errorf("unrecognized Docker image manifest schema %d for %q (%s)", manifest.SchemaVersion, image.Name, image.DockerImageReference) } diff --git a/pkg/image/api/helper_test.go b/pkg/image/api/helper_test.go index 49073d587675..b41e8cb9f9c4 100644 --- a/pkg/image/api/helper_test.go +++ b/pkg/image/api/helper_test.go @@ -671,11 +671,11 @@ func TestImageWithMetadata(t *testing.T) { }, DockerImageManifest: validImageWithManifestData().DockerImageManifest, DockerImageLayers: []ImageLayer{ - {Name: "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", LayerSize: 0}, - {Name: "tarsum.dev+sha256:2aaacc362ac6be2b9e9ae8c6029f6f616bb50aec63746521858e47841b90fabd", LayerSize: 188097705}, - {Name: "tarsum.dev+sha256:c937c4bb1c1a21cc6d94340812262c6472092028972ae69b551b1a70d4276171", LayerSize: 194533}, - {Name: "tarsum.dev+sha256:b194de3772ebbcdc8f244f663669799ac1cb141834b7cb8b69100285d357a2b0", LayerSize: 1895}, - {Name: "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", LayerSize: 0}, + {Name: "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", LayerSize: 0}, + {Name: "tarsum.dev+sha256:2aaacc362ac6be2b9e9ae8c6029f6f616bb50aec63746521858e47841b90fabd", MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", LayerSize: 188097705}, + {Name: "tarsum.dev+sha256:c937c4bb1c1a21cc6d94340812262c6472092028972ae69b551b1a70d4276171", MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", LayerSize: 194533}, + {Name: "tarsum.dev+sha256:b194de3772ebbcdc8f244f663669799ac1cb141834b7cb8b69100285d357a2b0", MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", LayerSize: 1895}, + {Name: "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", LayerSize: 0}, }, DockerImageMetadata: DockerImage{ ID: "2d24f826cb16146e2016ff349a8a33ed5830f3b938d45c0f82943f4ab8c097e7", diff --git a/pkg/image/api/types.go b/pkg/image/api/types.go index 680692610c4e..adee86cb3c07 100644 --- a/pkg/image/api/types.go +++ b/pkg/image/api/types.go @@ -68,6 +68,12 @@ type Image struct { DockerImageLayers []ImageLayer // Signatures holds all signatures of the image. Signatures []ImageSignature + // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + DockerImageSignatures [][]byte + // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + DockerImageManifestMediaType string + // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + DockerImageConfig string } // ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. @@ -76,6 +82,8 @@ type ImageLayer struct { Name string // LayerSize of the layer as defined by the underlying store. LayerSize int64 + // MediaType of the referenced object. + MediaType string } const ( diff --git a/pkg/image/api/v1/conversion.go b/pkg/image/api/v1/conversion.go index c1f7b3c19f93..7550974daa91 100644 --- a/pkg/image/api/v1/conversion.go +++ b/pkg/image/api/v1/conversion.go @@ -21,6 +21,8 @@ func Convert_api_Image_To_v1_Image(in *newer.Image, out *Image, s conversion.Sco out.DockerImageReference = in.DockerImageReference out.DockerImageManifest = in.DockerImageManifest + out.DockerImageManifestMediaType = in.DockerImageManifestMediaType + out.DockerImageConfig = in.DockerImageConfig gvString := in.DockerImageMetadataVersion if len(gvString) == 0 { @@ -44,6 +46,7 @@ func Convert_api_Image_To_v1_Image(in *newer.Image, out *Image, s conversion.Sco if in.DockerImageLayers != nil { out.DockerImageLayers = make([]ImageLayer, len(in.DockerImageLayers)) for i := range in.DockerImageLayers { + out.DockerImageLayers[i].MediaType = in.DockerImageLayers[i].MediaType out.DockerImageLayers[i].Name = in.DockerImageLayers[i].Name out.DockerImageLayers[i].LayerSize = in.DockerImageLayers[i].LayerSize } @@ -62,6 +65,15 @@ func Convert_api_Image_To_v1_Image(in *newer.Image, out *Image, s conversion.Sco out.Signatures = nil } + if in.DockerImageSignatures != nil { + out.DockerImageSignatures = nil + for _, v := range in.DockerImageSignatures { + out.DockerImageSignatures = append(out.DockerImageSignatures, v) + } + } else { + out.DockerImageSignatures = nil + } + return nil } @@ -72,6 +84,8 @@ func Convert_v1_Image_To_api_Image(in *Image, out *newer.Image, s conversion.Sco out.DockerImageReference = in.DockerImageReference out.DockerImageManifest = in.DockerImageManifest + out.DockerImageManifestMediaType = in.DockerImageManifestMediaType + out.DockerImageConfig = in.DockerImageConfig version := in.DockerImageMetadataVersion if len(version) == 0 { @@ -95,6 +109,7 @@ func Convert_v1_Image_To_api_Image(in *Image, out *newer.Image, s conversion.Sco if in.DockerImageLayers != nil { out.DockerImageLayers = make([]newer.ImageLayer, len(in.DockerImageLayers)) for i := range in.DockerImageLayers { + out.DockerImageLayers[i].MediaType = in.DockerImageLayers[i].MediaType out.DockerImageLayers[i].Name = in.DockerImageLayers[i].Name out.DockerImageLayers[i].LayerSize = in.DockerImageLayers[i].LayerSize } @@ -113,6 +128,15 @@ func Convert_v1_Image_To_api_Image(in *Image, out *newer.Image, s conversion.Sco out.Signatures = nil } + if in.DockerImageSignatures != nil { + out.DockerImageSignatures = nil + for _, v := range in.DockerImageSignatures { + out.DockerImageSignatures = append(out.DockerImageSignatures, v) + } + } else { + out.DockerImageSignatures = nil + } + return nil } diff --git a/pkg/image/api/v1/conversion_generated.go b/pkg/image/api/v1/conversion_generated.go index e4c6950dd380..4e89ec9a6a9c 100644 --- a/pkg/image/api/v1/conversion_generated.go +++ b/pkg/image/api/v1/conversion_generated.go @@ -202,6 +202,7 @@ func Convert_api_ImageImportStatus_To_v1_ImageImportStatus(in *image_api.ImageIm func autoConvert_v1_ImageLayer_To_api_ImageLayer(in *ImageLayer, out *image_api.ImageLayer, s conversion.Scope) error { out.Name = in.Name out.LayerSize = in.LayerSize + out.MediaType = in.MediaType return nil } @@ -212,6 +213,7 @@ func Convert_v1_ImageLayer_To_api_ImageLayer(in *ImageLayer, out *image_api.Imag func autoConvert_api_ImageLayer_To_v1_ImageLayer(in *image_api.ImageLayer, out *ImageLayer, s conversion.Scope) error { out.Name = in.Name out.LayerSize = in.LayerSize + out.MediaType = in.MediaType return nil } diff --git a/pkg/image/api/v1/deep_copy_generated.go b/pkg/image/api/v1/deep_copy_generated.go index fa7565a1537f..ff8e7a72f4f5 100644 --- a/pkg/image/api/v1/deep_copy_generated.go +++ b/pkg/image/api/v1/deep_copy_generated.go @@ -93,6 +93,21 @@ func DeepCopy_v1_Image(in Image, out *Image, c *conversion.Cloner) error { } else { out.Signatures = nil } + if in.DockerImageSignatures != nil { + in, out := in.DockerImageSignatures, &out.DockerImageSignatures + *out = make([][]byte, len(in)) + for i := range in { + if newVal, err := c.DeepCopy(in[i]); err != nil { + return err + } else { + (*out)[i] = newVal.([]byte) + } + } + } else { + out.DockerImageSignatures = nil + } + out.DockerImageManifestMediaType = in.DockerImageManifestMediaType + out.DockerImageConfig = in.DockerImageConfig return nil } @@ -136,6 +151,7 @@ func DeepCopy_v1_ImageImportStatus(in ImageImportStatus, out *ImageImportStatus, func DeepCopy_v1_ImageLayer(in ImageLayer, out *ImageLayer, c *conversion.Cloner) error { out.Name = in.Name out.LayerSize = in.LayerSize + out.MediaType = in.MediaType return nil } diff --git a/pkg/image/api/v1/swagger_doc.go b/pkg/image/api/v1/swagger_doc.go index 86fa005a657b..27d8382da37f 100644 --- a/pkg/image/api/v1/swagger_doc.go +++ b/pkg/image/api/v1/swagger_doc.go @@ -19,14 +19,17 @@ func (DockerImageReference) SwaggerDoc() map[string]string { } var map_Image = map[string]string{ - "": "Image is an immutable representation of a Docker image and metadata at a point in time.", - "metadata": "Standard object's metadata.", - "dockerImageReference": "DockerImageReference is the string that can be used to pull this image.", - "dockerImageMetadata": "DockerImageMetadata contains metadata about this image", - "dockerImageMetadataVersion": "DockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", - "dockerImageManifest": "DockerImageManifest is the raw JSON of the manifest", - "dockerImageLayers": "DockerImageLayers represents the layers in the image. May not be set if the image does not define that data.", - "signatures": "Signatures holds all signatures of the image.", + "": "Image is an immutable representation of a Docker image and metadata at a point in time.", + "metadata": "Standard object's metadata.", + "dockerImageReference": "DockerImageReference is the string that can be used to pull this image.", + "dockerImageMetadata": "DockerImageMetadata contains metadata about this image", + "dockerImageMetadataVersion": "DockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", + "dockerImageManifest": "DockerImageManifest is the raw JSON of the manifest", + "dockerImageLayers": "DockerImageLayers represents the layers in the image. May not be set if the image does not define that data.", + "signatures": "Signatures holds all signatures of the image.", + "dockerImageSignatures": "DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.", + "dockerImageManifestMediaType": "DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.", + "dockerImageConfig": "DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2.", } func (Image) SwaggerDoc() map[string]string { @@ -57,9 +60,10 @@ func (ImageImportStatus) SwaggerDoc() map[string]string { } var map_ImageLayer = map[string]string{ - "": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.", - "name": "Name of the layer as defined by the underlying store.", - "size": "Size of the layer in bytes as defined by the underlying store.", + "": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.", + "name": "Name of the layer as defined by the underlying store.", + "size": "Size of the layer in bytes as defined by the underlying store.", + "mediaType": "MediaType of the referenced object.", } func (ImageLayer) SwaggerDoc() map[string]string { diff --git a/pkg/image/api/v1/types.go b/pkg/image/api/v1/types.go index c3b857189a3b..e88d91b23283 100644 --- a/pkg/image/api/v1/types.go +++ b/pkg/image/api/v1/types.go @@ -34,6 +34,12 @@ type Image struct { DockerImageLayers []ImageLayer `json:"dockerImageLayers"` // Signatures holds all signatures of the image. Signatures []ImageSignature `json:"signatures,omitempty"` + // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty"` + // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + DockerImageManifestMediaType string `json:"dockerImageManifestMediaType,omitempty"` + // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + DockerImageConfig string `json:"dockerImageConfig,omitempty"` } // ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. @@ -42,6 +48,8 @@ type ImageLayer struct { Name string `json:"name"` // Size of the layer in bytes as defined by the underlying store. LayerSize int64 `json:"size"` + // MediaType of the referenced object. + MediaType string `json:"mediaType"` } // ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims diff --git a/pkg/image/api/v1beta3/conversion.go b/pkg/image/api/v1beta3/conversion.go index ff3da2b97107..9350815ea192 100644 --- a/pkg/image/api/v1beta3/conversion.go +++ b/pkg/image/api/v1beta3/conversion.go @@ -21,6 +21,8 @@ func Convert_api_Image_To_v1beta3_Image(in *newer.Image, out *Image, s conversio out.DockerImageReference = in.DockerImageReference out.DockerImageManifest = in.DockerImageManifest + out.DockerImageManifestMediaType = in.DockerImageManifestMediaType + out.DockerImageConfig = in.DockerImageConfig gvString := in.DockerImageMetadataVersion if len(gvString) == 0 { @@ -51,6 +53,8 @@ func Convert_v1beta3_Image_To_api_Image(in *Image, out *newer.Image, s conversio out.DockerImageReference = in.DockerImageReference out.DockerImageManifest = in.DockerImageManifest + out.DockerImageManifestMediaType = in.DockerImageManifestMediaType + out.DockerImageConfig = in.DockerImageConfig version := in.DockerImageMetadataVersion if len(version) == 0 { diff --git a/pkg/image/api/v1beta3/types.go b/pkg/image/api/v1beta3/types.go index 3d072c8b4c4b..d027cf2739b6 100644 --- a/pkg/image/api/v1beta3/types.go +++ b/pkg/image/api/v1beta3/types.go @@ -31,6 +31,12 @@ type Image struct { DockerImageLayers []ImageLayer `json:"dockerImageLayers"` // Signatures holds all signatures of the image. Signatures []ImageSignature + // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty"` + // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + DockerImageManifestMediaType string `json:"dockerImageManifestMediaType,omitempty"` + // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + DockerImageConfig string `json:"dockerImageConfig,omitempty"` } // ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. @@ -39,6 +45,8 @@ type ImageLayer struct { Name string `json:"name"` // Size of the layer as defined by the underlying store. Size int64 `json:"size"` + // MediaType of the referenced object. + MediaType string `json:"mediaType"` } // ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims diff --git a/pkg/image/importer/client.go b/pkg/image/importer/client.go index 3e4dd8e0c9f3..1b51bb7333bb 100644 --- a/pkg/image/importer/client.go +++ b/pkg/image/importer/client.go @@ -16,6 +16,8 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" registryclient "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth" @@ -71,6 +73,11 @@ type repositoryRetriever struct { } func (r *repositoryRetriever) Repository(ctx gocontext.Context, registry *url.URL, repoName string, insecure bool) (distribution.Repository, error) { + named, err := reference.ParseNamed(repoName) + if err != nil { + return nil, err + } + t := r.context.Transport if insecure && r.context.InsecureTransport != nil { t = r.context.InsecureTransport @@ -107,7 +114,7 @@ func (r *repositoryRetriever) Repository(ctx gocontext.Context, registry *url.UR ), ) - repo, err := registryclient.NewRepository(context.Context(ctx), repoName, src.String(), rt) + repo, err := registryclient.NewRepository(context.Context(ctx), named, src.String(), rt) if err != nil { return nil, err } @@ -159,30 +166,54 @@ func schema1ToImage(manifest *schema1.SignedManifest, d digest.Digest) (*api.Ima if err != nil { return nil, err } + mediatype, payload, err := manifest.Payload() + if err != nil { + return nil, err + } + if len(d) > 0 { dockerImage.ID = d.String() } else { - if p, err := manifest.Payload(); err == nil { - d, err := digest.FromBytes(p) - if err != nil { - return nil, fmt.Errorf("unable to create digest from image payload: %v", err) - } - dockerImage.ID = d.String() - } else { - d, err := digest.FromBytes(manifest.Raw) - if err != nil { - return nil, fmt.Errorf("unable to create digest from image bytes: %v", err) - } - dockerImage.ID = d.String() - } + dockerImage.ID = digest.FromBytes(manifest.Canonical).String() } image := &api.Image{ ObjectMeta: kapi.ObjectMeta{ Name: dockerImage.ID, }, - DockerImageMetadata: *dockerImage, - DockerImageManifest: string(manifest.Raw), - DockerImageMetadataVersion: "1.0", + DockerImageMetadata: *dockerImage, + DockerImageManifest: string(payload), + DockerImageManifestMediaType: mediatype, + DockerImageMetadataVersion: "1.0", + } + + return image, nil +} + +func schema2ToImage(manifest *schema2.DeserializedManifest, imageConfig []byte, d digest.Digest) (*api.Image, error) { + mediatype, payload, err := manifest.Payload() + if err != nil { + return nil, err + } + + dockerImage, err := unmarshalDockerImage(imageConfig) + if err != nil { + return nil, err + } + if len(d) > 0 { + dockerImage.ID = d.String() + } else { + dockerImage.ID = digest.FromBytes(payload).String() + } + + image := &api.Image{ + ObjectMeta: kapi.ObjectMeta{ + Name: dockerImage.ID, + }, + DockerImageMetadata: *dockerImage, + DockerImageManifest: string(payload), + DockerImageConfig: string(imageConfig), + DockerImageManifestMediaType: mediatype, + DockerImageMetadataVersion: "1.0", } return image, nil @@ -319,6 +350,11 @@ func (r *retryRepository) Blobs(ctx context.Context) distribution.BlobStore { return retryBlobStore{BlobStore: r.Repository.Blobs(ctx), repo: r} } +// Tags lists the tags under the named repository. +func (r *retryRepository) Tags(ctx context.Context) distribution.TagService { + return &retryTags{TagService: r.Repository.Tags(ctx), repo: r} +} + // retryManifest wraps the manifest service and invokes retries on the repo. type retryManifest struct { distribution.ManifestService @@ -326,9 +362,9 @@ type retryManifest struct { } // Exists returns true if the manifest exists. -func (r retryManifest) Exists(dgst digest.Digest) (bool, error) { +func (r retryManifest) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { for { - if exists, err := r.ManifestService.Exists(dgst); r.repo.shouldRetry(err) { + if exists, err := r.ManifestService.Exists(ctx, dgst); r.repo.shouldRetry(err) { continue } else { return exists, err @@ -336,10 +372,10 @@ func (r retryManifest) Exists(dgst digest.Digest) (bool, error) { } } -// Get retrieves the identified by the digest, if it exists. -func (r retryManifest) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { +// Get retrieves the manifest identified by the digest, if it exists. +func (r retryManifest) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { for { - if m, err := r.ManifestService.Get(dgst); r.repo.shouldRetry(err) { + if m, err := r.ManifestService.Get(ctx, dgst, options...); r.repo.shouldRetry(err) { continue } else { return m, err @@ -347,82 +383,73 @@ func (r retryManifest) Get(dgst digest.Digest) (*schema1.SignedManifest, error) } } -// Enumerate returns an array of manifest revisions in repository. -func (r retryManifest) Enumerate() ([]digest.Digest, error) { - for { - if d, err := r.ManifestService.Enumerate(); r.repo.shouldRetry(err) { - continue - } else { - return d, err - } - } +// retryBlobStore wraps the blob store and invokes retries on the repo. +type retryBlobStore struct { + distribution.BlobStore + repo *retryRepository } -// Tags lists the tags under the named repository. -func (r retryManifest) Tags() ([]string, error) { +func (r retryBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { for { - if t, err := r.ManifestService.Tags(); r.repo.shouldRetry(err) { + if d, err := r.BlobStore.Stat(ctx, dgst); r.repo.shouldRetry(err) { continue } else { - return t, err + return d, err } } } -// ExistsByTag returns true if the manifest exists. -func (r retryManifest) ExistsByTag(tag string) (bool, error) { +func (r retryBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error { for { - if exists, err := r.ManifestService.ExistsByTag(tag); r.repo.shouldRetry(err) { + if err := r.BlobStore.ServeBlob(ctx, w, req, dgst); r.repo.shouldRetry(err) { continue } else { - return exists, err + return err } } } -// GetByTag retrieves the named manifest, if it exists. -func (r retryManifest) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { +func (r retryBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { for { - if m, err := r.ManifestService.GetByTag(tag, options...); r.repo.shouldRetry(err) { + if rsc, err := r.BlobStore.Open(ctx, dgst); r.repo.shouldRetry(err) { continue } else { - return m, err + return rsc, err } } } -// retryManifest wraps the blob store and invokes retries on the repo. -type retryBlobStore struct { - distribution.BlobStore +type retryTags struct { + distribution.TagService repo *retryRepository } -func (r retryBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { +func (r *retryTags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { for { - if d, err := r.BlobStore.Stat(ctx, dgst); r.repo.shouldRetry(err) { + if t, err := r.TagService.Get(ctx, tag); r.repo.shouldRetry(err) { continue } else { - return d, err + return t, err } } } -func (r retryBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error { +func (r *retryTags) All(ctx context.Context) ([]string, error) { for { - if err := r.BlobStore.ServeBlob(ctx, w, req, dgst); r.repo.shouldRetry(err) { + if t, err := r.TagService.All(ctx); r.repo.shouldRetry(err) { continue } else { - return err + return t, err } } } -func (r retryBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { +func (r *retryTags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { for { - if rsc, err := r.BlobStore.Open(ctx, dgst); r.repo.shouldRetry(err) { + if t, err := r.TagService.Lookup(ctx, digest); r.repo.shouldRetry(err) { continue } else { - return rsc, err + return t, err } } } diff --git a/pkg/image/importer/client_test.go b/pkg/image/importer/client_test.go index 040456561152..5c73e6a1c0bc 100644 --- a/pkg/image/importer/client_test.go +++ b/pkg/image/importer/client_test.go @@ -16,6 +16,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" kapi "k8s.io/kubernetes/pkg/api" @@ -36,40 +37,38 @@ func (r *mockRetriever) Repository(ctx gocontext.Context, registry *url.URL, rep } type mockRepository struct { - repoErr, getErr, getByTagErr, tagsErr, err error + repoErr, getErr, getTagErr, tagErr, untagErr, allTagErr, err error blobs *mockBlobStore manifest *schema1.SignedManifest - tags []string + tags map[string]string } func (r *mockRepository) Name() string { return "test" } +func (r *mockRepository) Named() reference.Named { + named, _ := reference.WithName("test") + return named +} func (r *mockRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { return r, r.repoErr } func (r *mockRepository) Blobs(ctx context.Context) distribution.BlobStore { return r.blobs } -func (r *mockRepository) Signatures() distribution.SignatureService { return nil } -func (r *mockRepository) Exists(dgst digest.Digest) (bool, error) { +func (r *mockRepository) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { return false, r.getErr } -func (r *mockRepository) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { +func (r *mockRepository) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { return r.manifest, r.getErr } -func (r *mockRepository) Enumerate() ([]digest.Digest, error) { - return nil, r.getErr -} -func (r *mockRepository) Delete(dgst digest.Digest) error { return fmt.Errorf("not implemented") } -func (r *mockRepository) Put(manifest *schema1.SignedManifest) error { +func (r *mockRepository) Delete(ctx context.Context, dgst digest.Digest) error { return fmt.Errorf("not implemented") } -func (r *mockRepository) Tags() ([]string, error) { return r.tags, r.tagsErr } -func (r *mockRepository) ExistsByTag(tag string) (bool, error) { - return false, r.tagsErr +func (r *mockRepository) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + return "", fmt.Errorf("not implemented") } -func (r *mockRepository) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { - return r.manifest, r.getByTagErr +func (r *mockRepository) Tags(ctx context.Context) distribution.TagService { + return &mockTagService{repo: r} } type mockBlobStore struct { @@ -90,6 +89,48 @@ func (r *mockBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribut return nil, r.openErr } +type mockTagService struct { + distribution.TagService + + repo *mockRepository +} + +func (r *mockTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + v, ok := r.repo.tags[tag] + if !ok { + return distribution.Descriptor{}, r.repo.getTagErr + } + dgst, err := digest.ParseDigest(v) + if err != nil { + panic(err) + } + return distribution.Descriptor{Digest: dgst}, r.repo.getTagErr +} + +func (r *mockTagService) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + r.repo.tags[tag] = desc.Digest.String() + return r.repo.tagErr +} + +func (r *mockTagService) Untag(ctx context.Context, tag string) error { + if _, ok := r.repo.tags[tag]; ok { + delete(r.repo.tags, tag) + } + return r.repo.untagErr +} + +func (r *mockTagService) All(ctx context.Context) (res []string, err error) { + err = r.repo.allTagErr + for tag := range r.repo.tags { + res = append(res, tag) + } + return +} + +func (r *mockTagService) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + return nil, fmt.Errorf("not implemented") +} + func TestSchema1ToImage(t *testing.T) { m := &schema1.SignedManifest{} if err := json.Unmarshal([]byte(etcdManifest), m); err != nil { @@ -270,21 +311,19 @@ func TestRetryFailure(t *testing.T) { } // do not retry on non standard errors - repo = &mockRepository{getByTagErr: fmt.Errorf("does not support v2 API")} + repo = &mockRepository{getErr: fmt.Errorf("does not support v2 API")} r = NewRetryRepository(repo, 4, 0).(*retryRepository) m, err := r.Manifests(nil) if err != nil { t.Fatal(err) } - if m, err := m.GetByTag("test"); m != nil || err != repo.getByTagErr || r.retries != 4 { + if _, err := m.Get(nil, digest.Digest("foo")); err != repo.getErr || r.retries != 4 { t.Fatalf("unexpected: %v %v %#v", m, err, r) } // retry four times repo = &mockRepository{ - getByTagErr: errcode.ErrorCodeUnauthorized, - getErr: errcode.ErrorCodeUnauthorized, - tagsErr: errcode.ErrorCodeUnauthorized, + getErr: errcode.ErrorCodeUnauthorized, blobs: &mockBlobStore{ serveErr: errcode.ErrorCodeUnauthorized, statErr: errcode.ErrorCodeUnauthorized, @@ -295,27 +334,12 @@ func TestRetryFailure(t *testing.T) { if m, err = r.Manifests(nil); err != nil { t.Fatal(err) } - if m, err := m.GetByTag("test"); m != nil || err != repo.getByTagErr || r.retries != 0 { - t.Fatalf("unexpected: %v %v %#v", m, err, r) - } r.retries = 2 - if m, err := m.Get(digest.Digest("foo")); m != nil || err != repo.getErr || r.retries != 0 { - t.Fatalf("unexpected: %v %v %#v", m, err, r) + if _, err := m.Get(nil, digest.Digest("foo")); err != repo.getErr || r.retries != 0 { + t.Fatalf("unexpected: %v %#v", err, r) } r.retries = 2 - if m, err := m.Exists("foo"); m || err != repo.getErr || r.retries != 0 { - t.Fatalf("unexpected: %v %v %#v", m, err, r) - } - r.retries = 2 - if m, err := m.Enumerate(); m != nil || err != repo.getErr || r.retries != 0 { - t.Fatalf("unexpected: %v %v %#v", m, err, r) - } - r.retries = 2 - if m, err := m.ExistsByTag("foo"); m || err != repo.getErr || r.retries != 0 { - t.Fatalf("unexpected: %v %v %#v", m, err, r) - } - r.retries = 2 - if m, err := m.Tags(); m != nil || err != repo.tagsErr || r.retries != 0 { + if m, err := m.Exists(nil, "foo"); m || err != repo.getErr || r.retries != 0 { t.Fatalf("unexpected: %v %v %#v", m, err, r) } @@ -324,15 +348,15 @@ func TestRetryFailure(t *testing.T) { if err != nil { t.Fatal(err) } - if _, err := b.Stat(nil, digest.Digest("x")); err != repo.getByTagErr || r.retries != 0 { - t.Fatalf("unexpected: %v %v %#v", m, err, r) + if _, err := b.Stat(nil, digest.Digest("x")); err != repo.blobs.statErr || r.retries != 0 { + t.Fatalf("unexpected: %v %#v", err, r) } r.retries = 2 - if err := b.ServeBlob(nil, nil, nil, digest.Digest("foo")); err != repo.getErr || r.retries != 0 { - t.Fatalf("unexpected: %v %v %#v", m, err, r) + if err := b.ServeBlob(nil, nil, nil, digest.Digest("foo")); err != repo.blobs.serveErr || r.retries != 0 { + t.Fatalf("unexpected: %v %#v", err, r) } r.retries = 2 - if _, err := b.Open(nil, digest.Digest("foo")); err != repo.getErr || r.retries != 0 { - t.Fatalf("unexpected: %v %v %#v", m, err, r) + if _, err := b.Open(nil, digest.Digest("foo")); err != repo.blobs.openErr || r.retries != 0 { + t.Fatalf("unexpected: %v %#v", err, r) } } diff --git a/pkg/image/importer/credentials.go b/pkg/image/importer/credentials.go index ab932105fdf8..4bbb930520a0 100644 --- a/pkg/image/importer/credentials.go +++ b/pkg/image/importer/credentials.go @@ -26,6 +26,15 @@ func (s *noopCredentialStore) Basic(url *url.URL) (string, string) { return "", "" } +func (s *noopCredentialStore) RefreshToken(url *url.URL, service string) string { + glog.Infof("asked to provide RefreshToken for %s", url) + return "" +} + +func (s *noopCredentialStore) SetRefreshToken(url *url.URL, service string, token string) { + glog.Infof("asked to provide SetRefreshToken for %s", url) +} + func NewBasicCredentials() *BasicCredentials { return &BasicCredentials{} } @@ -56,6 +65,13 @@ func (c *BasicCredentials) Basic(url *url.URL) (string, string) { return "", "" } +func (c *BasicCredentials) RefreshToken(url *url.URL, service string) string { + return "" +} + +func (c *BasicCredentials) SetRefreshToken(url *url.URL, service string, token string) { +} + func NewLocalCredentials() auth.CredentialStore { return &keyringCredentialStore{credentialprovider.NewDockerKeyring()} } @@ -68,6 +84,13 @@ func (s *keyringCredentialStore) Basic(url *url.URL) (string, string) { return basicCredentialsFromKeyring(s.DockerKeyring, url) } +func (s *keyringCredentialStore) RefreshToken(url *url.URL, service string) string { + return "" +} + +func (s *keyringCredentialStore) SetRefreshToken(url *url.URL, service string, token string) { +} + func NewCredentialsForSecrets(secrets []kapi.Secret) *SecretCredentialStore { return &SecretCredentialStore{secrets: secrets} } @@ -88,6 +111,13 @@ func (s *SecretCredentialStore) Basic(url *url.URL) (string, string) { return basicCredentialsFromKeyring(s.init(), url) } +func (s *SecretCredentialStore) RefreshToken(url *url.URL, service string) string { + return "" +} + +func (s *SecretCredentialStore) SetRefreshToken(url *url.URL, service string, token string) { +} + func (s *SecretCredentialStore) Err() error { s.lock.Lock() defer s.lock.Unlock() diff --git a/pkg/image/importer/importer.go b/pkg/image/importer/importer.go index f11030b5703c..c715d822a4ab 100644 --- a/pkg/image/importer/importer.go +++ b/pkg/image/importer/importer.go @@ -10,6 +10,8 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" @@ -282,6 +284,21 @@ func applyErrorToRepository(repository *importRepository, err error) { } } +func formatRepositoryError(repository *importRepository, refName string, refID string, defErr error) (err error) { + err = defErr + switch { + case isDockerError(err, v2.ErrorCodeManifestUnknown): + ref := repository.Ref + ref.Tag, ref.ID = refName, refID + err = kapierrors.NewNotFound(api.Resource("dockerimage"), ref.Exact()) + case isDockerError(err, errcode.ErrorCodeUnauthorized): + err = kapierrors.NewUnauthorized(fmt.Sprintf("you may not have access to the Docker image %q", repository.Ref.Exact())) + case strings.HasSuffix(err.Error(), "no basic auth credentials"): + err = kapierrors.NewUnauthorized(fmt.Sprintf("you may not have access to the Docker image %q", repository.Ref.Exact())) + } + return +} + // importRepositoryFromDocker loads the tags and images requested in the passed importRepository, obeying the // optional rate limiter. Errors are set onto the individual tags and digest objects. func importRepositoryFromDocker(ctx gocontext.Context, retriever RepositoryRetriever, repository *importRepository, limiter flowcontrol.RateLimiter) { @@ -325,9 +342,12 @@ func importRepositoryFromDocker(ctx gocontext.Context, retriever RepositoryRetri return } + // get a blob context + b := repo.Blobs(ctx) + // if repository import is requested (MaximumTags), attempt to load the tags, sort them, and request the first N if count := repository.MaximumTags; count > 0 || count == -1 { - tags, err := s.Tags() + tags, err := repo.Tags(ctx).All(ctx) if err != nil { glog.V(5).Infof("unable to access tags for repository %#v: %#v", repository, err) switch { @@ -372,27 +392,40 @@ func importRepositoryFromDocker(ctx gocontext.Context, retriever RepositoryRetri continue } limiter.Accept() - m, err := s.Get(d) + manifest, err := s.Get(ctx, d) if err != nil { glog.V(5).Infof("unable to access digest %q for repository %#v: %#v", d, repository, err) - switch { - case isDockerError(err, v2.ErrorCodeManifestUnknown): - ref := repository.Ref - ref.Tag, ref.ID = "", importDigest.Name - err = kapierrors.NewNotFound(api.Resource("dockerimage"), ref.Exact()) - case isDockerError(err, errcode.ErrorCodeUnauthorized): - err = kapierrors.NewUnauthorized(fmt.Sprintf("you may not have access to the Docker image %q", repository.Ref.Exact())) - case strings.HasSuffix(err.Error(), "no basic auth credentials"): - err = kapierrors.NewUnauthorized(fmt.Sprintf("you may not have access to the Docker image %q", repository.Ref.Exact())) + importDigest.Err = formatRepositoryError(repository, "", importDigest.Name, err) + continue + } + + if signedManifest, isSchema1 := manifest.(*schema1.SignedManifest); isSchema1 { + importDigest.Image, err = schema1ToImage(signedManifest, d) + } else if deserializedManifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 { + imageConfig, err := b.Get(ctx, deserializedManifest.Config.Digest) + if err != nil { + glog.V(5).Infof("unable to access the image config using digest %q for repository %#v: %#v", d, repository, err) + if isDockerError(err, v2.ErrorCodeManifestUnknown) { + ref := repository.Ref + ref.ID = deserializedManifest.Config.Digest.String() + importDigest.Err = kapierrors.NewNotFound(api.Resource("dockerimage"), ref.Exact()) + } else { + importDigest.Err = formatRepositoryError(repository, "", importDigest.Name, err) + } + continue } - importDigest.Err = err + + importDigest.Image, err = schema2ToImage(deserializedManifest, imageConfig, d) + } else { + glog.V(5).Infof("unsupported manifest type: %T", manifest) continue } - importDigest.Image, err = schema1ToImage(m, d) + if err != nil { importDigest.Err = err continue } + if err := api.ImageWithMetadata(importDigest.Image); err != nil { importDigest.Err = err continue @@ -405,23 +438,34 @@ func importRepositoryFromDocker(ctx gocontext.Context, retriever RepositoryRetri continue } limiter.Accept() - m, err := s.GetByTag(importTag.Name) + desc, err := repo.Tags(ctx).Get(ctx, importTag.Name) if err != nil { - glog.V(5).Infof("unable to access tag %q for repository %#v: %#v", importTag.Name, repository, err) - switch { - case isDockerError(err, v2.ErrorCodeManifestUnknown): - ref := repository.Ref - ref.Tag = importTag.Name - err = kapierrors.NewNotFound(api.Resource("dockerimage"), ref.Exact()) - case isDockerError(err, errcode.ErrorCodeUnauthorized): - err = kapierrors.NewUnauthorized(fmt.Sprintf("you may not have access to the Docker image %q", repository.Ref.Exact())) - case strings.HasSuffix(err.Error(), "no basic auth credentials"): - err = kapierrors.NewUnauthorized(fmt.Sprintf("you may not have access to the Docker image %q", repository.Ref.Exact())) + glog.V(5).Infof("unable to get tag %q for repository %#v: %#v", importTag.Name, repository, err) + importTag.Err = formatRepositoryError(repository, importTag.Name, "", err) + continue + } + manifest, err := s.Get(ctx, desc.Digest) + if err != nil { + glog.V(5).Infof("unable to access digest %q for tag %q for repository %#v: %#v", desc.Digest, importTag.Name, repository, err) + importTag.Err = formatRepositoryError(repository, importTag.Name, "", err) + continue + } + + if signedManifest, isSchema1 := manifest.(*schema1.SignedManifest); isSchema1 { + importTag.Image, err = schema1ToImage(signedManifest, "") + } else if deserializedManifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 { + imageConfig, err := b.Get(ctx, deserializedManifest.Config.Digest) + if err != nil { + glog.V(5).Infof("unable to access image config using digest %q for tag %q for repository %#v: %#v", desc.Digest, importTag.Name, repository, err) + importTag.Err = formatRepositoryError(repository, importTag.Name, "", err) + continue } - importTag.Err = err + importTag.Image, err = schema2ToImage(deserializedManifest, imageConfig, "") + } else { + glog.V(5).Infof("unsupported manifest type: %T", manifest) continue } - importTag.Image, err = schema1ToImage(m, "") + if err != nil { importTag.Err = err continue diff --git a/pkg/image/importer/importer_test.go b/pkg/image/importer/importer_test.go index 0cc8b0b5d987..81ca7b27f08b 100644 --- a/pkg/image/importer/importer_test.go +++ b/pkg/image/importer/importer_test.go @@ -32,14 +32,14 @@ func expectStatusError(status unversioned.Status, message string) bool { } func TestImport(t *testing.T) { - m := &schema1.SignedManifest{Raw: []byte(etcdManifest)} + m := &schema1.SignedManifest{} if err := json.Unmarshal([]byte(etcdManifest), m); err != nil { t.Fatal(err) } insecureRetriever := &mockRetriever{ repo: &mockRepository{ - getByTagErr: fmt.Errorf("no such tag"), - getErr: fmt.Errorf("no such digest"), + getTagErr: fmt.Errorf("no such tag"), + getErr: fmt.Errorf("no such digest"), }, } testCases := []struct { @@ -65,8 +65,8 @@ func TestImport(t *testing.T) { { retriever: &mockRetriever{ repo: &mockRepository{ - getByTagErr: fmt.Errorf("no such tag"), - getErr: fmt.Errorf("no such digest"), + getTagErr: fmt.Errorf("no such tag"), + getErr: fmt.Errorf("no such digest"), }, }, isi: api.ImageStreamImport{ @@ -158,8 +158,16 @@ func TestImport(t *testing.T) { { retriever: &mockRetriever{ repo: &mockRepository{ - tags: []string{"v1", "other", "v2", "3", "3.1", "abc"}, - getByTagErr: fmt.Errorf("no such tag"), + manifest: m, + tags: map[string]string{ + "v1": "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", + "other": "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", + "v2": "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", + "3": "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", + "3.1": "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", + "abc": "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", + }, + getTagErr: fmt.Errorf("no such tag"), }, }, isi: api.ImageStreamImport{ diff --git a/pkg/image/registry/image/etcd/etcd_test.go b/pkg/image/registry/image/etcd/etcd_test.go index 15348b40cf03..22c1af6a448a 100644 --- a/pkg/image/registry/image/etcd/etcd_test.go +++ b/pkg/image/registry/image/etcd/etcd_test.go @@ -111,8 +111,7 @@ func TestWatch(t *testing.T) { ) } -const etcdManifest = ` -{ +const etcdManifest = `{ "schemaVersion": 1, "tag": "latest", "name": "coreos/etcd", @@ -161,8 +160,7 @@ const etcdManifest = ` ] }` -const etcdManifestNoSignature = ` -{ +const etcdManifestNoSignature = `{ "schemaVersion": 1, "tag": "latest", "name": "coreos/etcd", @@ -362,13 +360,13 @@ func TestUpdateResetsMetadata(t *testing.T) { return true }, existing: &api.Image{ - ObjectMeta: kapi.ObjectMeta{Name: "sha256:54820434e2ccd1596892668504fef12ed980f0cc312f60eac93d6864445ba123", ResourceVersion: "1"}, + ObjectMeta: kapi.ObjectMeta{Name: "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", ResourceVersion: "1"}, DockerImageReference: "openshift/ruby-19-centos-2", DockerImageLayers: []api.ImageLayer{}, DockerImageManifest: etcdManifestNoSignature, }, image: &api.Image{ - ObjectMeta: kapi.ObjectMeta{Name: "sha256:54820434e2ccd1596892668504fef12ed980f0cc312f60eac93d6864445ba123", ResourceVersion: "1"}, + ObjectMeta: kapi.ObjectMeta{Name: "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238", ResourceVersion: "1"}, DockerImageReference: "openshift/ruby-19-centos", DockerImageMetadata: api.DockerImage{ID: "foo"}, DockerImageManifest: etcdManifest, diff --git a/pkg/image/registry/image/strategy.go b/pkg/image/registry/image/strategy.go index c267199cd1b1..6d9b379d90f3 100644 --- a/pkg/image/registry/image/strategy.go +++ b/pkg/image/registry/image/strategy.go @@ -75,6 +75,13 @@ func (imageStrategy) PrepareForUpdate(obj, old runtime.Object) { newImage.DockerImageLayers = oldImage.DockerImageLayers newImage.Signatures = oldImage.Signatures + if oldImage.DockerImageSignatures != nil { + newImage.DockerImageSignatures = nil + for _, v := range oldImage.DockerImageSignatures { + newImage.DockerImageSignatures = append(newImage.DockerImageSignatures, v) + } + } + // allow an image update that results in the manifest matching the digest (the name) newManifest := newImage.DockerImageManifest newImage.DockerImageManifest = oldImage.DockerImageManifest @@ -87,6 +94,17 @@ func (imageStrategy) PrepareForUpdate(obj, old runtime.Object) { } } + newImageConfig := newImage.DockerImageConfig + newImage.DockerImageConfig = oldImage.DockerImageConfig + if newImageConfig != oldImage.DockerImageConfig && len(newImageConfig) > 0 { + ok, err := api.ImageConfigMatchesImage(newImage, []byte(newImageConfig)) + if err != nil { + utilruntime.HandleError(fmt.Errorf("attempted to validate that a new config for %q mentioned in the manifest, but failed: %v", oldImage.Name, err)) + } else if ok { + newImage.DockerImageConfig = newImageConfig + } + } + if err := api.ImageWithMetadata(newImage); err != nil { utilruntime.HandleError(fmt.Errorf("Unable to update image metadata for %q: %v", newImage.Name, err)) } diff --git a/test/end-to-end/core.sh b/test/end-to-end/core.sh index 0be3eb1f1976..96d256afd884 100755 --- a/test/end-to-end/core.sh +++ b/test/end-to-end/core.sh @@ -220,6 +220,14 @@ echo "[INFO] Run pod diagnostics" # Requires a node to run the origin-deployer pod; expects registry deployed, deployer image pulled os::cmd::expect_success_and_text 'oadm diagnostics DiagnosticPod --images='"'""${USE_IMAGES}""'" 'Running diagnostic: PodCheckDns' +# This needed because docker 1.10+ utilizes cross-repo mount feature. Docker client keeps track of source +# repositories for all the blobs. If it uploads a blob to a repository of registry having an entry in this +# mapping, it will request a mount from this repository. The authorization check is done for both destination +# and mounted repository. Without the next statement, the auth check for mounted repository +# (cache/ruby-22-centos7) will fail. +# TODO: remove this once the registry can access global blob store without a layer link authorization check +oc policy -n cache add-role-to-user system:image-puller system:serviceaccount:test:builder + echo "[INFO] Applying STI application config" os::cmd::expect_success "oc create -f ${STI_CONFIG_FILE}" diff --git a/test/integration/imageimporter_test.go b/test/integration/imageimporter_test.go index 444b08e07e6e..b58e84781f4b 100644 --- a/test/integration/imageimporter_test.go +++ b/test/integration/imageimporter_test.go @@ -179,7 +179,13 @@ func mockRegistryHandler(t *testing.T, requireAuth bool, count *int) http.Handle case "/v2/test/image3/manifests/latest", "/v2/test/image3/manifests/v2", "/v2/test/image3/manifests/" + danglingDigest: errcode.ServeJSON(w, errcode.ErrorCodeUnknown) case "/v2/test/image3/manifests/v1", "/v2/test/image3/manifests/" + etcdDigest: - w.Write([]byte(etcdManifest)) + if r.Method == "HEAD" { + w.Header().Set("Content-Length", fmt.Sprintf("%d", len(etcdManifest))) + w.Header().Set("Docker-Content-Digest", etcdDigest) + w.WriteHeader(http.StatusOK) + } else { + w.Write([]byte(etcdManifest)) + } default: t.Fatalf("unexpected request %s: %#v", r.URL.Path, r) } @@ -462,21 +468,29 @@ func TestImageStreamImportScheduled(t *testing.T) { case "/v2/": w.Header().Set("Docker-Distribution-API-Version", "registry/2.0") w.Write([]byte(`{}`)) - case "/v2/test/image/manifests/latest": + case "/v2/test/image/manifests/latest", "/v2/test/image/manifests/" + etcdDigest, "/v2/test/image/manifests/" + phpDigest: count++ t.Logf("serving %d", count) - var manifest string + var manifest, digest string switch count { - case 1: + case 1, 2: + digest = etcdDigest manifest = etcdManifest - case 2, 3: + case 3, 4, 5, 6: + digest = phpDigest manifest = phpManifest default: w.WriteHeader(500) return } - written <- struct{}{} + if r.Method == "HEAD" { + w.Header().Set("Content-Length", fmt.Sprintf("%d", len(manifest))) + w.Header().Set("Docker-Content-Digest", digest) + w.WriteHeader(http.StatusOK) + return + } w.Write([]byte(manifest)) + written <- struct{}{} default: t.Fatalf("unexpected request %s: %#v", r.URL.Path, r) } diff --git a/test/integration/v2_docker_registry_test.go b/test/integration/v2_docker_registry_test.go index f29a5f13138a..8e869ac07925 100644 --- a/test/integration/v2_docker_registry_test.go +++ b/test/integration/v2_docker_registry_test.go @@ -51,10 +51,7 @@ func signedManifest(name string) ([]byte, digest.Digest, error) { if err != nil { return []byte{}, "", fmt.Errorf("error marshaling manifest: %s", err) } - dgst, err := digest.FromBytes(manifestBytes) - if err != nil { - return []byte{}, "", fmt.Errorf("error calculating manifest digest: %s", err) - } + dgst := digest.FromBytes(manifestBytes) jsonSignature, err := libtrust.NewJSONSignature(manifestBytes) if err != nil { @@ -98,7 +95,8 @@ func TestV2RegistryGetTags(t *testing.T) { } config := `version: 0.1 -loglevel: debug +log: + level: debug http: addr: 127.0.0.1:5000 storage: @@ -106,8 +104,12 @@ storage: auth: openshift: middleware: + registry: + - name: openshift repository: - name: openshift + storage: + - name: openshift ` os.Setenv("OPENSHIFT_CA_DATA", string(clusterAdminClientConfig.CAData)) diff --git a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml index 07d151a7352d..408ac8c88d39 100644 --- a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml +++ b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml @@ -1405,6 +1405,7 @@ items: attributeRestrictions: null resources: - images + - imagestreamtags verbs: - delete - get @@ -1414,7 +1415,6 @@ items: resources: - imagestreamimages - imagestreams/secrets - - imagestreamtags verbs: - get - apiGroups: